var/home/core/zuul-output/0000755000175000017500000000000015136127007014527 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015136144604015475 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000275665715136144436020311 0ustar corecorexikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD p >Eڤ펯_ˎ6Ϸ7+%f?長ox[o8W5!Kޒ/h3_.gSeq5v(×_~^ǿq]n>߮}+ԏbś E^"Y^-Vۋz7wH׋0g"ŒGǯguz|ny;#)a "b BLc?^^4[ftlR%KF^j 8DΆgS^Kz۞_W#|`zIlp_@oEy5 fs&2x*g+W4m ɭiE߳Kf^?·0* TQ0Z%bb oHIl.f/M1FJdl!و4Gf#C2lIw]BPIjfkAubTI *JB4?PxQs# `LK3@g(C U {oLtiGgU#O{f}\{+>2^G) u.`l(Sm&F4a0>eBmFR5]!PI6f٘"y/(":[#;`1}+7 s'ϨF&%8'# $9b"r>B)GF%\bi/ Ff/Bp 4YH~BŊ6EZ|^_E`r"hA ós yi\[.!=A(%Ud,Sc̝G?ƥF%QV5pDVHwԡ/.2h{qۀK8yUOdssdMvw`21ɻ]/ƛ"@8(PN_,_0;o_x+Vy<h\dN9:bġ7 -Pwȹl;M@n̞Qj_P\ Q]GcPN;e7Vtś98m1<:|a+.:a4nՒ,]LF0);I$>ga5"f[B[fhTRk׿kb8_b|r wFuRI%T۩Ѭza\_/2vw>- MR9z_Z;57xh|_/CWuU%v[_((G yMi@'3Pmz8~Y >hl%}Р`sMC77Aztԝp ,}Nptt%q6& ND lM;ָPZGa(X(2*91n,50/mx'})')SĔv}S%xhRe)a@r AF' ]J)ӨbqMWNjʵ2PK-guZZg !M)a(!H/?R?Q~}% ;]/ľv%T&hoP~(*טj=dߛ_SRzSa™:']*}EXɧM<@:jʨΨrPE%NT&1H>g":ͨ ҄v`tYoTq&OzcP_k(PJ'ήYXFgGہwħkIM*򸆔l=q VJީ#b8&RgX2qBMoN w1ђZGd m 2P/Ɛ!" aGd;0RZ+ 9O5KiPc7CDG.b~?|ђP? -8%JNIt"`HP!]ZrͰ4j8!*(jPcǷ!)'xmv>!0[r_G{j 6JYǹ>zs;tc.mctie:x&"bR4S uV8/0%X8Ua0NET݃jYAT` &AD]Ax95mvXYs"(A+/_+*{b }@UP*5ì"M|܊W7|}N{mL=d]' =MS2[3(/hoj$=Zm Mlh>P>Qwf8*c4˥Ęk(+,«.c%_~&^%80=1Jgͤ39(&ʤdH0Ζ@.!)CGt?~=ˢ>f>\bN<Ⱦtë{{b2hKNh`0=/9Gɺɔ+'Х[)9^iX,N&+1Id0ֶ|}!oѶvhu|8Qz:^S-7;k>U~H><~5i ˿7^0*]h,*aklVIKS7d'qAWEݰLkS :}%J6TIsbFʶ褢sFUC)(k-C"TQ[;4j39_WiZSس:$3w}o$[4x:bl=pd9YfAMpIrv̡}XI{B%ZԎuHvhd`Η|ʣ)-iaE';_j{(8xPA*1bv^JLj&DY3#-1*I+g8a@(*%kX{ Z;#es=oi_)qb㼃{buU?zT u]68 QeC Hl @R SFZuU&uR~Aomˠ_>2h-/ ђ(9Uq EmFjq1jX]DןR24d c̖F4BJ2ᮚ苮p(r%Q 6<$(Ӣ(RvA A-^dX?Xa>EE衢^}p/:F?}bi0>Oh%\x(bdF"F 'u Qx`j#(g6zƯRo(lџŤnE7^k(|(4s\9#.\r= (mO(f=rWmd'rDZ~;o\mkmB`s ~7!GdјCyEߖs|n|zu0VhI|/{}BC6q>HĜ]Xgy G[Ŷ.|37xo=N4wjDH>:&EOΆ<䧊1v@b&툒f!yO){~%gq~.LK78F#E01g.u7^Ew_lv۠M0}qk:Lx%` urJp)>I(>z`{|puB"8#YkrZ .`h(eek[?̱ՒOOc&!dVzMEHH*V"MC Qؽ1Omsz/v0vȌJBIG,CNˆ-L{L #cNqgVR2r뭲⭊ڰ08uirP qNUӛ<|߈$m뫷dùB Z^-_dsz=F8jH˽&DUh+9k̈́W^̤F˖.kL5̻wS"!5<@&] WE\wMc%={_bD&k 5:lb69OBCC*Fn) u{Hk|v;tCl2m s]-$zQpɡr~]Si!ڣZmʢ鉗phw j8\c4>0` R?da,ȍ/ءfQ 2ؐfc}l 2窾ۉ1k;A@z>T+DE 6Хm<쉶K`'#NC5CL]5ݶI5XK.N)Q!>zt?zpPC ¶.vBTcm"Bsp rjﺧK]0/k<'dzM2dk–flE]_vE P / څZg`9r| 5W;`.4&XkĴp 6l0Cз5O[{B-bC\/`m(9A< f`mPіpNЦXn6g5m 7aTcTA,} q:|CBp_uFȆx6ڮܷnZ8dsMS^HэUlq 8\C[n膗:68DkM\7"Ǻzfbx]ۮC=1ÓOv$sY6eX%]Y{⦁# &SlM'iMJ았 t% ~@1c@K?k^rEXws zz.8`hiPܮbC7~n b?`CtjT6l>X+,Qb5ȳp`FMeXÅ0+!86{V5y8 M`_Uw ȗkU]a[.D}"\I5/1o٩|U戻,6t錳"EFk:ZM/!ݛ@pRu Iヵvyne 0=HH3n@.>C@{GP 9::3(6e™nvOσ =?6ͪ)Bppًu_w/m/0}T>CUX\!xl=ZVM\aٟ6h㗶E۶{O#X26.Fٱq1M k'JE%"2.*""]8yܑ4> >X1 smD) ̙TީXfnOFg㧤[Lo)[fLPBRB+x7{{? ףro_nն-2n6 Ym^]IL'M+;U t>x]U5g B(, qA9r;$IN&CM(F+ hGI~Q<웰[, qnriY]3_P${,<\V}7T g6Zapto}PhS/b&X0$Ba{a`W%ATevoYFF"4En.Oa%ĉUHSR0=>u)oQCC;^u'}8H0]+ES,n?UU{ x~ʓOy_>?/>l8MrHID2VSsMX^"NۯDc558c&'K0L /C5YDqNe~ض˸nErc֋@aw*r܀0 a {RQXV-/p:MP\<=<^越a/bz?ܓvjIg3MN4:]U]STa,@OKd9A7޵6u,? y| \;/"ΩeߥZ}r5d + kg+M0nCj.&O:g\qwV 2;g?;^L)$|;{bH)F[9} VïCYJY2+fQVUSFZ!Gmȱ 9"xLg,i@)2VՏt1tBh0E zכ1ش&cJV$dWӊ皠Ys둴.)Lv.d1Fڪ&KZXYIƇ?{wMר|"pI4%ez1)_zLz"`Z&Z![0rBK kzxG|d78[VZ3JO'| { Qk^K|h>f|l.g_v\TBZy{X-O_vwrpP40ޢE[A͝ Z5 om2p)lbłx_1+=u).s¯c<:wN*|KSlg_pvgIxpI"O c S G-V+Z:H2vd,P4J8 DV|oV1>a$]Nb;s=Kx̊ygbE&>XYs䀚EƂ@>n>GgѨ@OĹCKØi4A嫳B@hn*ӗQ7 }ˍzR*mBA2,'*In?K饸+ wCV?j@)MΈ/\)'Q'8 2|J0êϓ"O+mmBzp-p޴&Z'Y-B6-[4 u>r,8?>7uCC5F %Ն R Cu8?28¢E We; P0:"nlMvOezR= ]â. U`V% CQX v#'Nv%j1^ܒZB$*c.)H ?`5[Z!}nliצ) ݆Y~ cPwίX"{!9V0~\`#U8V$}bpNU`ZS̯窜8L^O+m-q6E¶8^ SxR1Xj~=rs>NdMLmtїUJ8" kcMlf2ǽyWLiXCaSmMÙBgr7[ Nʇ)bAgX'f0]agB-:YSokUdE6c>Ql~JJ#`~#Eh3ŕs,|HrVh7m]Q!ӥSVB l)DzJ |0M>8l WIf|\8U*hг˅r-3'^\  [Cr: QvOS}ll>ŰAVG YңK\rnɪq(u$Bk|TUН}h!8l?= S7!8bdEuK<^F hxndSD,Xt]1Gm*h%)(=XUza^&6"BzξH( ."uw>.,SzbQ!g:0r$ضz['.!-r"1MCMu(kP|еcLqr/Hi%(azry !5yY~ :t>gY #)v緢!BY)Hc(<|qDOIW^9oJb^;sö!`hDZoe@JLw9|fb>ѺRce w0/ qo%HWs\4ݞtf@Զ߼/ȇlCf0C`~ ƾ}O+D7 P=xD`^_[d0Yݎ@z BO2۽k%xe'NԋKI`Yu>7v$Rf[A'2)b>~w!I.WwϏߍ|~x(qUߤ^ӵyx^TE.7Cu/Z6m.Ǖօ^ߋӽfz./ZܗWrn*up+t\OW U-C_wS!|q?E-S_w$-%9?nwh{R 5mZM{=x$ FM8G0Z'Y-X )@V7G)zL~dپ&+<вFzr?㘟,q~,b6)hRgbiRgH1_'9(ɟSYpŘ-ŶםG+qY1౬CxAX4xK"5XoITd E$Z[NdwS:֢̆ ?GЅ'JƖ'ZX t{O֠U sVAqFJc,Ih: ݢkNwE 4eaK0Z / > <|d= }L3Qf xdRhEB{s \ d5;*Ijdɥ(ȊV£55N ۃm-߂8 `6w+ x-f  E\ڈ t-nɭ\h%^wWt!{q.lrDsΒV I-*F~L!Gf"Lf0OWv?"1] `W+7m4e逼p +{6g߷Pg%,IR-Ř`QbmүcH&CLlv`ScivG'հu7.c 61spH5SCt)eNqǪP@o`c/#rv6*;WJ;[.)4R\=V~ׅ@~>_)iqT 8;DQs@4¤>mlN"jek-R9~ {]'##AAwLѲVdJ.ԫiE׬܏ȱlR~voWP~ RTM#rQίE|s`poSfbpnVGIGTq3JC`,$P_KxB[cz)ut0F>v1AWHй qPq3TN9W⌎I Nʇ)bHb.uD`X}nl}\hSؗNTlѩyۢUdE1LӰWlU&Z!hoҒ"HgKX 6 -{zwҌleb}nltwfCEI"*o'Cpl0\neή]kY8Cgcu1!_Ɩ} 8$ Ўa 2+ም+A[QA%Oع „&䒿e;]0:|IIc(R&#ӓecE,dp&nPS '4쾔h|OO-D#ڥ4bZsH{ g3JDcSKu9k-nUAG`2y*8F*-Rz_zҗЩsd6|`U>F*KZmR;~:xI/iFIu]dsEGزM+ycF;bH,te[(6Fƈ^ʮd3 b`[c1髰?(o$[e(l^ȣg3K?nEq!*RZMCpnse~ Fzq.8vz;j}ln2e UZAZv+QD4ͺ|@lX)ĕ̽=Tse+ϙK$s`hnOcE(T#|\*& >Vv:V^4R7[{u%[^3GGL']9~yL :!Szbvc2.R3`Hz)OfJlӪVWF fK6%OOE-kmW`l[ۯc_^ ޱpcoK{[#U jt*h2V߳`JBNcFSUȿ1:WѥLf2HHh)b>~w!ޤ&\UxsnnqCq]w߃뽽){Q=\rw*}-Om )l?7zju_݋yZTuusn Qww^"W|{_ӎ'|;.n@7 >I m-8NJ\NA څB}>Mh/A8?_;qŽ6xmPf_Sp6aMUAP*tLn$l{=lq&Ϻ/j璳PܸRKm;RCsT$Un2@ "G{}ZI>U֞ = ƹ%>Hŋrw/t:7EdE޵,"e_B&XǙ'N3`d7%I6Ël&)Yvl7bbL$Uu.UU]bD0VHdGu1ʰm'4q\!*`<т:NdXdOzJd zo$MQbJ"Vxwo4\^3ƏUX,.5xy)GiUQJ0pq=ZF-(TqVܞ$_~}T^8Dȷ̏V ┧Ze0VXEuy ]ojz\u>ރ?VU4'v2(34G?U] ~]]Ub|.X߿(x0]xƂQ2=͘bi5N!`We?<'b(c@8/=2,o{<%{:%GnXY__a//ikmy͗ŕc",\Ӱ#N7jx~/+1Y*#=hc'̵\1;!NL;}o-%ڃdEEQ<#WY{zNLJݕ z˗;ٞSA X40O?\dO)g^{B9B7#q/H'ogD f$6RD>w]p%}|rq K4ttG{6P =_<uw( <"(^4lMڗZ ^ 3-ݡ!9w=U|6i[ {DhfNâd+ `@k0ÁMϿmE\b_B!ߟ^4<^K_{L{7otӵt81R#x/=e-/w-2ludvQзD4*KXRqĂU3%+qT =3]ge,`mPՊ%"T"$YXƍ^Z'*bZb2{ Km5⢂Rky!Fό&ĥym#43[7-v\f؆{CDoF{֨-$~FAu.Ɓ'#Eq>j*[]i2 s0Q!{Hz:y(&$+Ų*U(l(Px,kgF8(vo-{i|_Q"hqyI<Dw@"wg((П&%>d묈WF5ɠF}>b,6MiN17oax1_҂?y)"88>ֽzNE6&"h튦yqp7ljQi%~pOi-<{K+VKe>y(y@2o1ӤZ/yx1n @ n\Gk5MltMOk1nZp ȚKu^m28,(7<R.p =^/boPqt[ɩ eƵl~*.3].5y@n28ǕIgEi%qWv^\;vɃMJJ ~El$Lj:Va$-߰ԶSknn"})r)jS誜o閰Ji߽m9,,fOg{b!ycCq22!ȅ'o!)jc,oNC>*ۓZYr`?t F D) SM8mm*ojHKO෹2]8RܦqɜVyV!T{A`< x%}rv/Ⲫ⧍4lȶOӶT#Q\ ne2bx1MAZcLSR[f:0mGE#~yBT @NRSH;K ~ %ߡܝ▱x\[6nd&bqsCi7͘hz(k{28{2{ז7=e! ]L0y1c* qF,֙Etz.y{71 @{2LjaP3qZDJ\Dagh{o&F[}@+!YKC0M% ;!>@gE"v@q;b:!gɭ6Q`>2 =\"B)Bb\7q@NNbw}XylTϚH&>7@y{7l2L9{5y{2ji\y=4n|LFQhRT(tyKC$^@޾&bհ3Xb mP8x RT,9!= 3+0 uK LEqp^&":PW4#ԅC8(6yE*jS!0}+`LoO #M|hzܻKDOߺn+Ե9)n?{ܶ 3ג;uk4ER1%%;LhٮdskY$9x<@^b49<2鸊f:YLa]"hVc zvϤX9\կϔ_A,|a?fyKgօU[:[Sr֪UZin84uϓLOx~bAu4b?|}zG..&E>u,Pk6ĻhjH3b4S)뷯-<>ߓӷgۻӃ×?o+`\E `3!DGy?$7"f '9{.O5mDX- ַ0GX=1K}` v=C,n?v%"?wJe`5!zH;<=垠R;#٥;]};Sp(tKn]1Pj=ߩ#`7p{&{&Ɗ[v50rvH7#h/-8`ӝix0{&be=ҝ# /~&蹞02q+k?~#/;bpM=Du޽}~ЈgnP{{&]0E3A~SK}Qkpf- ޽~рey;u 7 [./+?.<޽ktWGf&[Rs7 \֧b%{s `> fVq[BN$sO]#X@k,n TГ]t1߫SUY,LB, {gmnaJ۝+ nڎ:b:ߍ=@߲ `ѩgVۀFKp?ۭZ7wwjP(J7LaHjG ^ @T߮6@wh##lޛ'Vv-$"!A<o=o6a8ml6@ {n`=Y\Z_¡}X.z ^8+LZ8Ӌ $U+N<"oKwӮ{~ct3x"ۛx,f_vE/m:~2xh@O) uN^--Y͡t]> 9g[>]D(KR"xJbu[شj஑{$/4y <]Bg TcMP O-EYI&eqEuTxT>e߃Xr|+R条n ;K),Jur&*b輺RxJh+AJaVHV Y6=(c9 ^:ϋ7<1P>n#%iLa3@"!Ch-spAhA϶ƅSIL="Op\| #8a` ! 07eL"}`,(ǣ%ˑ rxB ]eh, 徵Q\Y #" FrAHN!3 IyJρ`qu!9)9yo]* 6 s_T#S ?kyʯ[!I@ xFl(s*K 8G%UXdZ|/٪SCHwXgm~ޚ=~ r\gkIfTF6wO涣Y?Za{&6J˵96.~lZf$Mt(q1H;WΤ4Vu `^XOϏ^B:S%Q4 /ჱy#?^]ztDNbp1`_JPe+u1dP @dL㺱`mr/9B`~5Td ;oү%[Ju1hĦ rsbn1 孔4|^%yH?1VZS+uw c4.b 7w1=juv|-PNj˱C lZi\`` sxrk'Ey"'EEÃ(-l"o~Ǔj I@! ~ʦa]I;H|73ԏgܱM"19Na&9nq;8"?u  ۵AM@؇Y 澱_)S$Xh) >{f-<DrQ g&)4S^Nz]ZU8̆q.y"GjÑ0I0zFNcsROF(Q2|tBvV>XQwR{2Å»8{3]=RH-%6uF ud Nn'ٽ?"L?=؆_8dA w`36mY!P`[ՠlO08nF>wŬ-]:VV+Dו p_)elF憭G3J^ӐAA}YP[~<ӊNy{P `hCF%i2Iqe<ʺ5eʞF(ېPwӦlFeOlUinîղ4Sv]uyZ|}ZHR jA>7$^PK:O#ِPg B.>PwCB5zkO4B Tt k*'T;dΐ..a: .g߻GYo&)4OќDfeV,_|[ga0p`]%/-}r)<8m|bЪw8M4X̳ 0BhVU ȾyK{?W'<?Oʬ@u7nQBWvvPԶQx)xEYJ4(Q~V5JSx́Y5[H1#dC=]QF& > -iYuCIRUa5tC +e9 # McI|΍V8`o"5 .f=iYُ55Z:L"Ww} ̖s`/IBUaTX&%-m:92|ݳ^擢Slg0xg͊Dڭ tz:P KgUZFZ?AWK3xw%bPz +.e1bI)$~3, 5! /iUrxk4]H d24[>gP@,?Mi]<_VdU#{Lx2]AuqLu|س`k=2@'=e)Iy+=ªdiS)4ưz5m!hd4i鍂`†z`֭T$'YR]e094_Ic+eսKgdaPsJKeI.ծ 㶆PRLgٰ`>PAИ#Ty,eH5 a4  -w*5 De+< ubo/7Zߢ9G||~/SdJkU*: *)BD% N 8!t@wG2XmՂK;.1̖Z~b;٠ 682**g{<'.n-唵]ݥcd**fHmg[~,F W{LViH&xF&X_lX;фu:'kEoir*H%o~S@]J(kg7x}\S[jW-eO.[bomEgy+D؆ x y{|(Kw])ָ<‶ \>wQxBgxC.J=tGM*OA֯Hr-mVv Ǩ@t-}0VF:nt}qPpS*0O42|3Hڽ5Ɛ@0CY2FtYT󁔼 [O*#0BLƥ^leU\efp4]1o2Ag<3Tv$6o)hfg[lYY:+M iuƐPQ3 Al}&YOH@c~B["I>:ܵs픉\9Ȥcó#ߜ- jTtJVF.pmZet9aivlᵖSa޷P>B>Cs.[vb)v6 ND:Yri"UTn9̹e\ ,HǶE}WE"Yt/O1,&=GJ.\պNWgXS'2iH4PvbVӨT;/TMIvrR=N\ w؀ۄ~l>Z-4-Z:>vw0o0jGzǬ:Pcc=da[ꖽH+y^A>Ӧ QG'@AN| }[bnr_TNSn< k ?zv tof1ޯC#I?e2O)k=(/Y*,ʭ8fk݆6: g=4߇vgڪ@zC"KK$IǦˍ4Wf8jlPqݪiD9iOtv6tugIHVd82dh/om6^~A Dq&n% \!訍?ZpγԿUw_ 6T$A1NI'{mP=ꁟiw^ $_{_mTF>;ưAg;)Oޕ6v$bà9tߧJHEH]ewE"2/^~}{0>_3roӿ(0)=NJҿAOc6?ǐ7b?m~ŸyГ26J-/^ פY&i~ N\ 㾌9m/n_m-rSixKd=$د=ŵ#gx^~1]o\Uz-$De!1{\6mo=(~WGI@ #>G4xT߾?*lbFV t0(. HIgK/7{?R$/H1qdM5~e;|q,(`?.br3ka37G(;߭*ThLހ94BME0*eq.L M t'4Hv#b__oFPf !ZԪ/hG-<}fZKVfvp5{m8H`pl%˔T7cBBksne 9)F] 1}Oq[r> R7ȽiPQ6#9'9.A>tw+=l3ϵНǽ;2Wry"EWF"^PsFS&B6BBP$%a__ptWKy8}ڃIH.@PsE Nc:eEKOL*6suukt(#T*d>dByU3ݩٵS3c&:t J^%7QMX O S#_pB c<L g "BiQmoy.wzIEv[9p rnWn9_Wb T+E̛ rs.Ĺ &&Z=kt帏R M )Δszācp<*! DWЋɳkS%e^s^G܏`ƏYubl#h]Ҋ~)^qvٶS?4ܺRHp* 4OxsGnp'urjh/HP(m/ ű FqZiCֽl_6٣jc$>z+.2hRPfqrjѡ@0fEF?r\aa10m.JE q'/;>slCT/oD#h>+]("+Maq9K6y1cS&ӾdJtI"CNY١b(Ctc$3qԲp m(0Z-$)g"E+YPONtv.@c:j-1^2"L/͖GgWm\sKycb8D>ԓK0f/!Μwc[HHND'C8ǰec_8`m5fq3AOʋNoD ('#-pmG.k}{/8=w;ib0L Jt6WR$Z2E񙣘p5f:MA,қp0NV"&Z-"%ؔId)8^=8e~MF>'- a8.OJaQP]''O O؁r"c JdQMT|,젨%Q|Bt挟㶲qie=_eqrp8y&cpK\^d֪tJ@ZW:4;ŦFH:Sğ?ZJo>l9jrO١"pROY;̅%vLEN3q}@ -eM4HS%R$=w_.i-/h]Xc0j16EkM__kC4˽MD݋nD99{}5p>zǧ!?mᩀ^DS381)@L@%P/Q6muB/иo^{zAfdAZfSbomEkc]EbsК!^d ǫ}NVԤE Jc U'_aQJC_, *_+gmyCi>f.D+k-z s([,At^y3QpQ=G$ ң"t_R֒텰Mp9yBzm=`nM"r TEL&pRR"pvn,T ʎ'W0%9:iUq."d'͢K9FsF&k^}bq2iZ2ٓdoKċp1cVhQ!zk~L%ߗ7ɕ ܩ6y6|B/ZQhY{RB _z.<8N;2 ~MNȋb8Y2i$̛d9?Ɍ9k//(^<|bl05z}2ɺ~_1:T튡|ȼ]74 .RvWSXefnw,8>9VCΣs)_q"ܓ7,E/Çgwgċbu_Vwv*DˏB}}ps`urqњr }ǁ|``-X .dm==nS,N5][e]POG\o]=_ONA~u,,)uFG!]}شDXpM*ۉL`Z:ڡLs4(u/%*-GfltqsX߃ʽiS8FfkOZ,NZ/PQW2-Ћq"KמH:!D>ؤ ~>^X|KȔuvv ZH|RRMd%bIMBN!oB(,8ˢ|8$ʇ^j@icS9m}tmyl׎PY69U=EBtO{eQ(.+{=dGY|D@V'%* G%-ѢfIf{6]G٠yYB@$K8f]_ |XѨϋ+kg?2qv59~e,Bkguk q𕠎%j/To5n`hD { ߍ!bn$/1ߧ?V~LQ7*VBQ9&v_ve~sq(_ P6٪s{` rrIxrϿd ֵ. P ͺRj`qrUx+eD]}8k{w+ir@XS6Y_13,8z6o& 86c:h[0qƚ`jž)b^8F0|Lo0yf/V]x|ƬKad%F"CM3 9:5lq" =sJ:8z^PtU)YGg4/I;ZgHJ(}طϷGo.4q917&/99JKAMN/nR[UwG=y-FIIK1'3>䭢']$̈́7e3K' _y'̆ߖ2$&Z{ϓpe],"%Jv'_oaL$4)"B9BWVbT҉Cۛ|1Z~64d!zݓ D#jb3Hx$h5x绕ઠh"E ?vN/1e?q3gw>lcAnYvVbXinOQ~fqrC`pe˃uq:̦|+ς{ +jJJ V]Tr2fZI烟ao6K4F&-EĘM%1Lm$ rd~޵S >6{C],89&Lo.em|fR̗>O>'w_ oSk‚ٽLKÚk9,g)޻Oձ.Gz‘#=J7rsl,ktl9-iZ%6-Ѧ)ޮ,]Wt KޫMA;aB;]~y[ADi/At~:%~| A^1w bYŘL +)ROQBJ2c?GUexmg2tl7m'i4H1z(%f,J#ԃ؎D?7cJb p] ''ZcLtśty*-7Ԝojz;I, v44M%UvZzўKAy#!w'i)8˥ݎ+&]ԀyRfQٽxj;B:IGivef]5Y@g"77LgzSդkU:3%aSW؏C`nx>?A»K#gGᓿO/| _MxuހZd~94#!DI/.~ oh<KH8Z. ;M(k}q.v:WtA# &gHUYs.N9Z8ZeqZ0Lt<GѫE EE<[,\ĊO*‚HQbO+K ]O`<孞9w̵t2q=aCW(dtzv3)hF ofnEp7\qpUῷf0x:s8Chp-d>x_c\,_HC)= >]^U0>MG 2v&=y-"Ab@VĎ:wGA𗏸&ɲ~~t5M驳4=0aTc}:a+O #׎;tvtx>@A $EFh+(F0›?+(n1X@\22./Kj`l?Ytuk54gKr<1+Q@U&^ө3&t<9ݑ_K?\qI%YJ\_:oZq5x܃\4/.[e%*k[F#ZYexY[kq`̬M`2:R ?]o+;b0)1&',|[i#𡳉zc n'S^F9gg`6m@XDL4а-,W8RVs붺gQZ/te ā #edy1Gfdsrj(bv%ۨGe|1i~US(-]G1/o@ G^ꚹ)܍(%|l0{|`VG4-ͥ7\dBE3:;:h4<-4o#,&qu b 7Y:e3%5A/MwKMvоrt!]ĩ2 ,-8?t],O6-/pxCShЬOx~mL2-l}xEB@0) 9:r Jhsɦgn"D{K)z)Z{gٯu2J8㬪->h)4'FKt"Aߟ .䙣췣JY@[s%}qH>. -.ihE][/ӼtE.R/4iWTNqE%@.Ýj:2 -E>rdf?hb4yaH耱8ݒj!)&ݑ|WjN\U)TPxI$08d$>Yk[/4-rӫsŦ˻m~s.vՁNXHp,1#yIMNM.0mؖޝhr맮lfFHbn<*Aar8>91@ c#eLcehrerffSwU0-b]NG<չٹ$GղWOmͶWKj5J}kGԙ),qen1ތ]~l4@J}hD`-yDcj#L,IIRfi6~,TI֯:mZ83cLfhݲ Y6Z#v M-FX֫Mѣ/D8g tARͩBkbPִ;1ܽ4J8j7).[ETSerS~p[O] ~֦)ZPNv3rWlk v-va}'VS&`5Vc/ qp n w4O -M sgV)Qe5P+h.7iWdM,DhFzM x5]6R~_LيTxZI|vfYw>p&.ޟfUng2)N.YPif%۶lw-JqCn~K):o3Rm;0ϡ&iW$ {\ɖ~,eib!0W&W&LR0%Tں)j3]:2ǥ$po Z#Zhk,J)֞&+].{ Pߗ~uWwgD>t~> BT˵z ?O_83h^Wa%Fae"$U f0.FqJbFL7\Bϯt^ٴ#ʹљ[ d6v z3US$wp X.L:4glt9E_de#aK!GH8A2 WHbB2g4g4gp"dc9#IS0Ԅ"I $H"c45*,G yj*g4g4AApƦ,_3B` e(1^2 Pbn4*FqDX.M$;V挦5⌦7 =/sac(U<8!j1(aDQ$$  F XÊM;jM;8ghx58wnɇ񩻜z:lBxڳ@廮{nr?p{%omNΌKu=wtvJEW 3gp$+{bJ@bv+PBF |4_K}B,^2t*G#CuC2?^"WRZ*vxCuF&0ÜEYz9?Z)Vf$w˱Ӌyb N36PAhuȰP_]z35bfdR̼ )Xay|5L<)Y@:A,>}D"/t!ckX];"@.="~@D/_N\Iŏ/_H"v!\ȝG frzÔ-W3-pT@p樃+,nQ[|+67AbN]tPlP܏^v3v5K\!NjɲZ\lӼȵc[rl n%qƚ쎻:(\փ|Fh0wnlۂӕ+7Xow)yɝ{u')A˻^OEK-[> ܿ>_ϧ+{3A`,rn%d:܀`F>C\r?6*Z6JgX"A͑%LI1 iV:&Lj@%/]ܲ Z|{&ex ?RVʧi%7CsIN`>p,&f>.B}>/x'Pώ<2<˹aϾvtϡW#PmwQd?#}_/c5Y,U?s`.'|k2NrepM ~1NS  L`N9-5RqYBhxWT`̉?-4c_H38Kgf}=8DFc;{܀qݫ#.uvUگ{(\W:wԘ>'LՔCpKơ wt:)y Husz $/]=o^aᕬR+ZkKqF EBİ1ZJPFL0UVs҅qĽGId2iIIdJR!qD) -$0)#S#tGqM@ATFhJZEbc(KD1 D cM=jP*F {TՅ,7:$sMTʂLbO්M$i"#(|䌑R]Hha(A2w "PeT\KEZ r1C8̀,Ws7RLWsGPj9{/oa >n1/Mѫ)bHR%ȓՁ kY+cx>}V'=Bn"!!F"@?ǷNrE8ȟF~%Sg:pIX#t6':?]o u1 g&×Dw*|2\;ۍ#ZLJIuDK8#ZZWAMyMXn%؎X^ukS+"xy8inN`qߞL`%qAII: Jpo٘]t3[t ~ N6OmgkAI1]@?pVb5Civ.,knXHhs%G 2\+pEj;ǓRO7W2S-:mK'$H})nϖ.Kf&S2FLbdF4p( 9AڄP B{ /F LP5rA0+i $X)DiN,'_:j ]{oH*LlQ {X$^lp3sA hƺ%(g.wjR)ٲْlmIzq\3vb)-;+޵y1 m;(,qQH-O;MNL,YMgpa,X%a慢kTX~#v,r-VC$KeQ-h)=,A9m43 A(f(X J]vE3#$_qGf~)qFOrt!st++ ̘֌;@ZHŲi- M>E"cPN') e'CyRυZ8 kA76q0{\ -}WQ֠~܈ {kG HCdcѦ^Y&OM_^ǔ&nGބ#$u%Rq# g&SM8"%W:RepHɴl'"J$5+ ȤpK酰Yno3GN48 HȇIc PwVе& xKG\Pz.ZNHr:B`*ͻ lxׁaptrEmL I*΀^2d)AŠ#.I%Xo)ۏHéT,5iXN*I"DR:ѳ4ƠQ&$[IdV0v3Ӣ,4I^¸Y$h<'އ%): y}l>rHF; IAW!0}Υ0kOmܮ@Cά~&\:K;0ʺic !o2*)8He P9m;~c. m\53CAC)23xbs+J^4`:EḾ'؜vZ$5t|w)%H,$&+ӌ&+]AOb6)4"ˌŎ|(YD 3M&*ZˍP}NH F<z&lgن2ՐgZT#1X|5&6[ۏ ȡ%Is& yK%ɋhR( $N,А,u.D>0fܗ) c3-04JFt  tfHq91s150&ah-+͐-7VIߍϹRZ=_M0B h~b͖zިNvo= F7t^{V,7ecrJo0lŴ4jn\ e&5,5"f*.[: $U@f:Hn38XمFT7oւ݂|r%eƿ;@sS̺[W^k/槝kNcӱ1cK(š:H ѽ:*KlS.^$s4[&]Њ;c?ߓ@1Β~ sR/vTڣN[H.?} ^\^?6u }$:zqoLiҚ\,*doY|}fqy]u"JӋ^NjIX!3+r\\׭۽-@/ؑƗw%3 3ͧHyi 6Z"ڏ_uÉ"˴sT'Ec !V&jo3#1¢]W(TD @ ev@,`EPEcp6(n~qmhٿU@$ \ ,ˈZl)Ԩ;WV+ .@8eHG^a:]H}1 !=.aGоI?11 ` -HIòPT`VZuհ, =cWAmdD4zŢ2f>(>S2;7 Z1_ *Ŷg.M)Ky =vA[Su: <|פq`W0jSàV+{;-vFOQp_i(B)0(u! h pAwˌ*d [!mvA- }/3V+п(v斍9\>W!֪=ʪpmFc40L7ڏ!(CVCm?Z"*H b ֨:=lVd4<KLa,ҬMMzK "5T )SI=Lz% v-1WAїq/gerH܀]*7`@o9" ; Kk//$7ۏБa?LbI\ )!Лԥ xRl`;7KIV+7C:EK"BZ=>"åٍm(U$ }`A-ZUvS,hؠAkm ZwmiIUH:aX3pd*f"SDLa[mQnQoћrz>>7\=Q@}\*s4s:G-9.;.xo\B`Jq]@c}{Z {M0_^ۧh{;Epued4 IF=n}+)21_/aV%ƦRd:1$Tu'ot [Iq}zEv:o]lZf.Յ;4 #i[[ߝYL{gN)4о4^dOa5G1n1tHyݻS%Ѣ֔A΄[F*Tż 5ueK7ٗ`j ZGҀiBG Kߑ6Ҝ3Dr-qkLCh?]Ԇ#f}/ʽ}{_삨}\$3+h>輘yd.EY7zjH'ˢOp}s>Oh}Ҟe|~sgV۪ &y_}Lvh,}ĽqG}>Aƭ<#|T=}-};':FouQۏGGGu֪/gwJ>/v \?z}ë?I de24 4_7oVC .eE2Iʈ[߿Y\F}uFBv< օ/'A 8;nU-/78%+qI<'6)? _d:|1*:.]ݾ<]t-нfUD[ћ 6>/k~,1叝ԊQk[SI^\m0K.n5sdjzU92GedDDպGZp2NHyϮ41b*FsZ nqqF >EɖuR=Eeo-`UdbƸȐqK&tR>͢+TDev˫ty1ofU5NJZ^իjAX k~6I枺I/gl}F9Z/ -o h e4zʊ>Pn ] 'Yi pyaJh5,9:q,w w FIyq'T^Y`di%x׸Ub# f/T`+E*?Cs! h!zm~,Fq`?<|3P:<<`9Ŏ'6"0VU]Fws~s$9sZk(i5)K[Y/tO:Rel8aP^_Ƒ()9?9e^ F^]~EۭJu"cVVӚ7{:!K^E<cS#nm]،v ׯfc[4[;@5 {sizd]-a݆d-zYc-b~`vcPOYw,#8 {*ޣu/S mAZ!Ksmjj~4‹]qڡ|눃;(Wo&/ϓ|:!HUVŇWޔ$r4-Z-GWr pUF%`.ڛۅ4S.MJyv'Wi%$l|ʲ'0t/㏣^e.I)?M'=n* 0{K ʫ@;XuAݤB[66K j:.;<ʆ=`y5ɏk8^.i W:ǚTCCtk-{;=4ߦw}/(!{XGhRB+>\B6[ ]$|b&3Z˒W~%Z T,UEbxx_P?Be="i'Y)F}:FE͉'%U_Xz,1 QH}3YN_5)ۙT# iNjGܧlOoo~,f낤vxN :6P "7[Dsd"Sc8p?85AG+A\8%8rZS+܊^J6T W%i,$W&Lj1!nDd3{MYrr8a"PaOKThM?.|tiQ\}mAGv"H ]_nkx,Q~3jW7vjæ>l7}/3|y*]S2c3/܁9coOWq׺xjvp29֑.9?ħH_O&]]f=R7m's,g6kgJCxS8׫VHK]jWյ]}\Ɣ9U۶A Ck KktRx316zz?(n"-! )p\pTԈPƨE`o!B+:N=B/ꜣ'_!L: 9ħ !qRq2"xrbbX6@4\{nJb#j9',ǾehWk}IFUOÃMNڹ=%A;X؊ۣh(WeÙCkf:F!mZn5j. ZG#ayw#X{o!ױY=$~͖_^a Y;gW87XDTklFL:qs7_7/'_aBʘ<΅6ȜqbG2b.Y,|E8C<*Ezŵ~dPsaqRp)zC3Hֵ+4 jx.} `ɕ0:8&BIsn ,3tsuҭ@cb>1}F׽&r#ycu hf|)Jlmg`"Hp..d[-e4}zOD?|*O& j^5}fjOۄ4ͤmğ7Q#NelO(2Uƍ]e+E+ w^z{ڈwa^ ϓa)qJ6|ݮ-3 .JMeT̎*ႄUFq@߫JlIῧvd&ZY#=&BAyߐ,W!c aYRH2%5;M ATLk {yEu)rMȹLo{f}<ڌ@r 'Kd+,RMŬ JyipBށ(:DEn90uQ 5DQ7)HpŴ,N7 3L&W5)S~~oS2ul&[uNEVd ҪBfht ~3ĭNg4A"te3^bذ4zV тmA1XE3^DD8FO-+Bk4 "jlˌQ>9SmJuލ@]kW>:,]Ars7̡,lc! 'F mc!ZFxp=cJ:Se Pvw 3Ha{StcFA߀5'F[N;^W:DܹXj (R}ҲԩJ_@]}JCMGϭ$IJ:XSG巣OeOiW2L.) e#h2x58 )S=B;AO0i>M7 WGDjR$ ͢*Ll'2Ŕe&p "+wֳWE{[<1"sR:kꨣ9GK0"W|Ļ#J"!gDC&g,E6K "U΅yMGجyfAKw:ї֝GZ ge;[ka_?%X⮛D8&&e$zĽnG{ ;{̍F[x 3[2õ԰ܡ=Ƶ|!e[!7/o!:.24l8^/7a(}ŏ lxrNcߍ ( t!WҸ߄ʷCA}E=`us^aDJ脋fś}KnӀ@5{y?](Ly;eKR݀;YumQlE#!P\⢘ Rmr98Y<ڋ.^PP.ɍb5K[MR0ծn mGa݅x.u 9'(5=Sŝ&GmP@Py_~Zc t(6d4-M MS,| 8ݍ*Ƽqx) yoh!^SPsC&=U{p{)ݾ"+- Ƣ>6OՃw.+1 ϷGf7(׳{Q&xg`3,c{Ky;t!}ENyx6+ʡl@ZFWS!s,%Tڦ_5,iv6(E3o" BneSRbP2pF9/w<;mmN# ߠq6ģ& u]2$ _R$dP #>3!%U.cᨅm JW 5ΟdҠ|!_p␨=&Q=@w48UqK#Qh)qN(rPG!䨣x 싃{"(: -izyak싧 A +%a9}؏vi/ 4xSc B~{/ev)B)B3ѪvѾ8wѾ8A\[y`2Tu4?DWL [w)dW ߦl/ynD#fhٓvp)j3{ ].} :.ٟm Mo_p;aΦi8"k'((5f *Ԉcq[썊֕[hG~?_zg !+f5`|ܦ[OG{MGLnay40 /E7bnDWrUO.0TU7)Y$hezqg/9i2~(_4(w~?1OWYJ UlKdSY>7ڜJ gmNx舷T9 /0b8Nv%u=m:fȰç?GN4 (K"cTSq  Lh 3Σq\ةLyR=9 iwFy_⡦c%?6q1n}W71xMXpy\+4.!„1L׷PDoP!O8Jt0J2x`wzBކi3U\A 4 PLݩQuپ !L E9^4*#_԰}Bܥ_DyLέ~ g⧷u/9G<#8HAOo#'ʚG_a̓76jc"a=?_O# EI-$@fB=3#E"BU1ڈ!F&9kK 9] L8ױ2:Vv{Ȝ=R4b& cSf(m w'jGЄhwC1܏ q/y݂B=֟&o*nKlU!{s1rʈKoy&NY92KUl'msH{^^oPP*~w ">,+BqY1Ľh[Lb8F+;m]3}g3,^ ] ùY4$Mv@SS1kLHwNyE9VbWd_Hޗ{&4z[^+4 kL/%e*Vνћ.CEBڱCzlלxĵ~׿%b5K"am. {HO(x).?ɥN,;q'?$xs'4t3qP 1v3?zM=&YxaSsz]rUٔ@LBnSLnjz~$Bя;טTT[(ۨIwu{H+}/c)CE_[/ղ:I{?5SUbI)NZ 09@ۇpZɖE< 2D9jHʡG-Ɏ*M7z%y6ҌӶdV;NxCao/k鑑촛GQ!{Th.jm꤆΢0廇&(uG( k`!PZai@?s5"\CK a;%*혃6EI91r7?e(ݷz30>T^yVp;5|eZ 6|F`Vś;/^7>o~/d% YZ!ssujZj$^ ÛǀF02p7i*H \)M BU;TxInLu]cC jэ4axĹtu(.L}dG5̒(ZrFo >C |R3]eIpw\U6u k.mUE(AnIj!k/ѻ6_cLq1ggU׎jF]1P_= o ^fN"cE1Ev$rYų\B3V:S |tXJp` -Pu2G;r"v,Zs+8jkʘBں'iJ(q֝UB_GvU+^t]fq#xNfbU_#nX_*KG<Z<*|V)E/PAEg 6sR! ZjOlIC>Lt橣}`C-*;|\nH}M8턋7Ө6xuH~sehfQ{؂d8R|oX`{FTj: ?\L> %>iJYquKEco)G>PZ*`sIխ]`G[fyQ+YT ##xc(*RDD/;KDcvHgm#QSuRb_c[,|R糣>5&ؗh.(WqÌNxϨZqqj{fmybVۿ$[MDQ+{舔ݯ1͊F]A3ЬOT#Tb3 d_U/?/3 _`ӍtPEet Ip.J0&ΧWTBWWd4ZeHg$XmɊ1 xpmnOu"[L2f dU~lj1UGC T0IQ1!6y z2*olMTsr‹IW)RP𹤃۟_bB>'vv1kL$"h*m Er)RiО.>cC|-v:y] I-&XHqn 67)Dp>boWOr*8D#q/u8)G;CܷVG7w}&0HHK7*`lŬ G4oL;ۚ)#)"1H1]-0H4⮺s Wu5"$5b&qRv* B J}zd$\-Royٔ @&Ӕz&}5mIS)G$s 2^䞒 4q۳)gZLՒ &eYg,v9xAe43MrFպ1)SFAW)'hiH; o兡SG4`h 81yt,Gݵ?98,x]KyΈ$\žڟ3|#^r 581L煠EIxYfpayN|VbTRiZIr]qmV'P0 r?{~tьaxC#*{x^`]>7v:6m]y_{{d@:m҂AG * 8_G+wHLu7NL[3/CFs x@3Mhp`"=2Ez(~A#va=Y[8|ݱ*iEA7ij|~jz ЋH8j)Y!F@ވA߫ \ ╺ A#a8!1 #i="3?| 8Dp=㧏-=F1Ox<t !X:!F!^c/CM QuAy4+8*ZM,0XC&;6Er? S}^b쁹)LKcv?DcE1EcWt(sBo͡kص![£VX֎ն;ҥ<{"UVڛۮW $vckyfMQWOeV^,S7͛b1IYbܢ@Gp?4= e1w8%e rNG<)"j;z}W#BjckLraRm1͋M5_[j|p79^kN2"#ZT "4ff|}\m/~:%\gMEO"kDGT\t4% ځ= *` [ٔyрyd|veRE}Mە1gtQU:&1^J c<'枠`{d$ mUzF'2Qh5ݧ} 5؜}~d1`(BU ^d> ^KS\J>0_}pD)UAK0i 8`5#dNURF&=2[}+~P@1 h.ҏ]?~]xrhۃC?w_6+r=[֗?> `Urf{r 2j{!,FV4(GF(E<-`uUx(x˕7H4ɘ8K+eVuCA-ȇKRJW :MGܟZxfrHouǀ_Tmǀ_s5|jK kՋ׃5y틢ȇXWܽk> X& \崋z,b`a:{d4vGێCTa4_j;J{X0a0u?H[Ņ012`G6#|#—rxxKt.*W6=t<m SRzl*rk2.Hf"I{"|O+GBwuARGL9͡bx0O%ߋ肋D#=ۤF𡦈w S-&2`_^z3Iu`dEN|*²LRrK, GE>T|ĩNйM%`I \W3'lD0iW6ȇj\16(M=w}&`%6;i|G]QVe]F?3M\) &SNZ;7I0 vaȇ*ū~`&*##ݰ*ū~^^(߼GFʮq{hK/kĆ퓵z(dB,5OɇDy6>dvjB?u"WqںV:eMѪ ۷h)TDm۫iZ!ڒs!_nJ`BGZ zWS+%E2bN D30dJ. >̋roHT'\:l*0Gnw84#8;9#8e[g+6ߴ-D.걵H!-jNsi\ϫKx-eqmܺx[#xqS7?m;_uM2D_7|QNUs) q`-O^ܒms #Js/foKJ ^c`ws|VX9 Bw>Xaӫ)NԑLeT\,<3%mgKs~>J+_vPĪLʑI?iWCAkXWew)m-x4Ǩx[43,ϹD Zg$&ǥWGF}Y}[%Ayt[d~YO=;MNNyԦvacsgu>Ꞻ!eF`ח?sn;m"(6 yG'PT?[ g Xm^j<6dz*ʊqQC O5LRiX I9?V4`?]Y3,"?cԴ+F8Y ZVVag|bΙcMN h!mETUN,X|"&%#=2FV޵:#bKleyH6 h\:@GGck#xm[?jXh(yEG?bGd0=]͘s"5C҂VWiO>|Ќr3ƖI!"ð(.Bs:xDC\MP!-CD@!|Ò1P"QVҬGD$!% o47!_J c-¹ݔ:bxvZV>=~\Lk|z!~7l8o<#ˏ~yWW7cF~*0b@AJ#011iHTc+*$1&& oc%U)a^!ߗ e*)D\~}у rHv,5pO4 Gd#A:+pa)C "ֈ#gD7myD"!=?o}aJߵ27=޵ :xD^8>>Рhv¡tkI{dU@o:xD#eg,Y@+Du F6kĤu3:8$"d,rݧ AsrYN]@[1S*w,Ip@1ڔUd 9@Aa2~)ݎ'';}1"PSΘcf[3@rR` U*5Hl"KiӳZbuz;=~d p`ឥ 0k _-a/Oi8!u<" gx40o=\PYF+cc#Y J.$.O<\oyDm[q Wtsq!~ 9\ YWPD<" 1@ibROf3bRv&TqXre.(cd{?|2! g6ru.)oO&$GC1_x4Ԅ |~s-gqu/B%)?Be"rytM?+džT_SֲUI-w҉1N>&!]<9-%1n_zKD8\pO/&IO>xVGҼx_[mB "zKi} z/{[hO7a?f T"L^eoR3>LOS_''C4{? LkY&_XlN!|2vWn/~m|rUɪcKckά񋷆p!..CW; cYLL*s LדbZ]OsAeAˤ'g_Lp_&^l} Ӟy{'?t'wObkC# .|D^!'ɔ_IK1E0k5u}j[RpV i(Q$TWصuWNKuk4O !%30E#*{#ѺX:љl~o֙$,]|ݣ%<몢N#b$'q*($C*TZμ$ n苳Kŵb4$ktalQ>B;Zb/#_OR4>Yn2Sz*lB۞ e '㻏m kT JITm5Nt\ѤFC@UX;1~d ?{z]@%[t+ot٬ze6C=@cgD@y0}$#ce%* smmITN:=~d dTii_Wm^5zv()PϬQ:Lk jWq)۸j͆o$A@H<" a4Hzԗm,yf9rͪ Buq,=PJ/Y8f˷u}ӣ8G\s^ξ> 7|TF,2oQ~k#h:7l:bGTۢJ/Y #X`h 7>J JӜKgyfX(*-2kO9eB3 -wAg+#3LLL?j]'Ì$S7]9)Ke{x t gf΋}#uw 3j !>SWz 0/(HCk~!C4F2lRjt_ A5ZQ'GOOc WK1/cJiʾ^FJq8Hxԙ%!Ԫ%B# Kg!4g0LIUä*B1ez\ʣ)߂J_Vӌ%e\[3 @l.Cg'Ėn{ZbSOc cQ)/dQ%]8&ޙ5nY!>k"dDUyzHJ"NrcRr~l%dn%'0Mlz2BFl-SEJ%]dϦTwGXfcU10fX8/Z;DEkQ(F!gͽQ(tsU*#!6e },ɷ\oшF "{\,zտCP?B4/M%i9-x $R\j*=ȈԤ{>h:>[_ak ,1+,̈D'('8I Q$Rκu*2y8H sw % b&"-E̲L`!Uk>|[mB "48HMsO$O}dvz qw&`({QTF~;I\v42e8k5( >?V~ h2 SMcsUT䘖z$؛,G/x#u PZv{٣ڎߦ*er۴joQ1ta LL%Щ{ՏxYۤqTZ)TfYUƍǰGʁ`MK{k^ -XOc-ip)Re2<9PR&ɡp^7b5ZS?1T4FѨ$FArp˚:szhTi1gs~f,yA4]lx qlv|m$1IWk쫝B^Tv*:ȓ[6DIPfR=z Pk L@:tawPrh9F9ZIh6⾟EqpE~L` 1ETB(HC|{P`w%5 b;QOc w"2|5k\~1=Z7\/D|G~>8MxPX\%@Q{ۛV2Baw|Bi{wu/rRo/\~=e&̱W Qv/tН1Z.Mfڻ%J9s+. ^8, VRzJy]/gФ5nin?Ϗ:ظB f9ף?EL cZɜ.oJ)GW]蟯U %޵0E#⫻Grw svuw$c.M/%\]e L S~JL2;':9d!˝N"HhPrskSmYnC-dV1qp *L2);':I֜A+8vNic͒8T!8r_ʳwu (8Xœ^qC[cǴs]wừ eتc☸uȘg|0Ή6r! Thhaqh}Oc/s'NV x-eC\DG/G: Ny9,9AZ`#Cd 4e4-cr ܢ}>)ZQGq׮j:Lư 9D6LS `qM8a̸tpΉ㮮0HHۮ-lƓ%M8܉c٥{\4tʲb6ΚoKVKd]>^AFBeS ?IUł$ª6+gZ~ boӄ,FX5]XJ4:> GcqF6Pk o˰u, `h̵ b ڶ>{,gkك)ۢ1rQ;'J p~a2Rc,]i_LnwR.;1uڑRy aUlJ*ӂsu-Avj-xEa4 HdȝN }2 3Xs  W+B d< W,LqB xb#0 qc:YwgM|tc ҂z ՔY|ţ菘0p [Aj@Ƣ5;&>&!.Se؟]m˕ '2]tǤ0^-:ݕNrtqs3h`խU1qP&o_>qO<9q8 alP[1Ud0h*f6̂;ZJI>Ŷ2=@.1U]ϱG`1u\jp_DY#Ժ [մ#K+/4*+[Hf^#"Ȣ'mX(UW#UQG1lKs2mqz ">Z۾LSz/ kzJ{_:iRQG1ڷ]fwV^QGı'q-եW}L+\D4bH>AjCؼ)8.ZZm]?u}Q \yi$ 0H1<g)vIਮ *I,4zbEfEip"ct1/}E|=~Nɝ{۽zқ|,^XȱuTmS:6.b 'I| V( ]ƫ <#Fǃ1 ,zR@pm͏x//`w?OÛ#"voa2WMfL p 麐':V 3hkY9hH-r d46t&j64ϴO: X~n:7;7e7,K!SU(",i>Ih04Jrޓz_fJWVYrsl:\UB/Y: DL߄<TgUa&3EvF z\/mqFd ݑ=YP+]*w4%{I.YefBki +櫆g@V,NRo?B6%Ohu 6HjXDbi=EW&S~>Ufڛ AR~fÜr ?i)vɤ}e S\9 'L`,ğ$Lac*]T36|m]" vy˽8Rm\7٘j}YHO"sGTL"e9$BNf{Ch+Wي@A c-nWzI luEC653a@RڦmM 鶧(4|tBqdc?C&':&[lro "-dv4 ÅyvK;:Thr"Sn/ydX1nI0?"25YJ:d}>6(c폹HmG V$瀾/d*0r:M[s%D~`f|HxA ͸SM-ɖ2c2FYsKQqsa/_׫%wD^3Fr-<~GQa G#HIcg/HXvO7056ѭ'ҡmk(|-o׌LC_Сg ݠD0# gS~5G_{PB7b'J(eTD_9 ̲A=hM(΋ٶ КC%eUR9!; Ŵ_A3lXmUFk zXw=ma ikѸb:S yq5k:}62\YLFȭfW9ҥ5#_%oq{ )$4ؾ@4P ] ^B38 BN/q&[ ]4~i4,zo 4Uz߁qm`7`ޣ^r-nթ?A [VcGbH3\.1x+1;Cl\%J񹁨j FTJ?ёG0-Ix . C Dzo4I|]1QTؒl96y.fJMXI behz*3OtcYLASB5LӶ˷`f< {!ϓ8Ov.|[/p2TJnELߴڎZ.xf~(va la֦w'EI`uƣ74S \͊~|?fz,[xnߔFY 60"C N!\7ꁞ"^@a=;bZ@ V S4 ˰-ԣuAzM m\&;Ԑ, ;S趁?DoNE@E캒vjE}%W!NnӇ_2Y4L*6`+W47㔒Ci/"]>a?G[$ ?Oc| Ye7)2 O/sSlfzmc'H_m+{A8LwTxx؏n.$$ vvOO>|??;>:8<ېiQ(U6SX>0GA!+q J=>;?_V>9Jy9>$og°Y7Zps5.M4l GrTS(}GN vcpdg4{a_yx5E5Ca:ߵipMӃdXҥ>%<Ym4ElOLĕ״Z&yAɬfMv''-RC;OTgx,ْՍj7>Z{\ӷ>>4̯ 4)74r/x.vD:q)%ᄞߒx8(ZSIwy,mZn>/0I`=nf@bG,?_ @Y*Di/(]IίDk`GE֌)ŗ{gj,LYA$b`x^ %K_2Eb%r㞋.l)$Sҝ|CL ̴'r!؞`B,v)ۚyAIgi1:ι|',vH k.r1 {GT+RꙇRіGQ2l+K|E3[s0 7aF)߾P)(Ja]E^ƅ|*A*R.~E8Ui"Cl|NbCUC<wu 5 ;}%'* ق[valX}pܗR#>saMԟTy㓣F*rsm:<#mua5:1qװ?1RBMą%R@} ^ q4tJܪ$"MzφYϭ @6l9GT^r'yP@6 "ȍL>&^~ z_\Ϯc8 ZpbT4Qv^($E= 1YEP;F{>`YIHeӂp;DϢ%T{ bXh;?^ƒe=( MIG@`0㔬L6JAe !,[ṓEà w>ygG?< ^^uR7ī㿏 N"B0zGGVT*r ̷tDJG ?fOvL*qRX/]v{/5$6bD/G Oc#S3128E\qx9u3rw{?Al?iu!@4rmfB!!`GOAQVw_^9ᏺF)i¹qbQ".fM0~s"1y P"zu/,eC=* G~D ;sEl.+& l-,=x{e,ܨZoXRǢx{Dḃؑ{ۑ+<ݜ+5N3JF4dN3N/san8O3jIyXĩL2'd%מtdg]B qӌhӌ| (\R[ksU;&oqɶkXo#rfMVe曵Ye/M!TɽrO|;nJ{wʷ|t5mחo}u * cӫo@߹M67C7vh۲^j#r B 7mՅ3jeV؜|8'44b/mo}'?l?yh5 olb 6V] ]>7 I\L]ڳU ގ!l^}wzu~9=g)zBtM ;\;SIi&`"_ ݖV1=r_6֧ޝٵe/7JgL$ܭ^XzGήkuGv6&GZom/ӹu\! iLP/W lh@}].f T6yy~ZHNl|AAU=|ʔj <18g\F~3eeydNYсu7g>th-XѼ&|EͶ*w>kwCȆy5`Ҏ jo %Yf_x9zÎS_OP^By E,4E,*1Wc/Mգգg>ʊT@7̉4$[Qŕ=EzVL[t>/%9y|1x>|@+ L|ۦiy{]HJ@q_\qGh&^߂F7n'2 #ƘC Ѧ)Ս ͞殇cN݋4Or>ֺ_1& FX^ʖ˳VU#)6<8$Y";!F-;euW1[y'6N.n6>u83mŜh[bu"E~r*e_~l7wVFjj.;o DmZN6]4-gs{5 dMcK|Lre|L~yYUY;[EsΝ+{|k@V ?X]lEsl"V(fݶbymū5CM}NTM$_*3Ig+Yd%]\P+<#C˻ vxEPI:w-|S N[nU[;kɵjN.;zB<$01Œeΰ~qMea=g'߆9Ki7N͗A;9M8]ocb A MD\l` J#Vs-s{'nkzUBak5KExmFEQG id(3|5%:<ybVRMxC ŕQcsQty좖1؀ľBKCP0{)ZhĢ\`e])O\: r(&-s=2faI,_B x#j<⒊)$e@NI>_@xXm'De5ABD"/PԏB@Ddq.{WWBx&:x8 xe[r:s^+X+P^HUbni( f4o58"] 7sK5val>\#1\V_Eb%$Db0L\y?Z)xKCGtq(v$*yg DPX">!p@! e(%O~&-ĵ*:vOJ(OxT>(l-6H#dBDe w %j$8m(BA( w %jD>c s w5[ĜSj _@xAq#IkMH6c 9E$$;K(/5=)Xi(Ye@h fu %-t 7`BUqnmo'Hrl ۂmcvb}ی&m8=9eC6g\lBU>4|U֪2G[?@B;UR#!\CA|< A '{ʖ[zֽr דV+[7QN\άOqZ$;;uLaT{:;uvBV O,|>і;|l'!93?dQxQsGqJ2 y-t'e,ꇟ$pmyf4Qx:zU͵]jZ Ͽڶ95Gè+m~uB.+H]ZMz {8f5ŚCf" oq C= DY%$Aì4י\*>0!(p9^.gٙ]άDbkrGV\\`UШ5`A)45޴}.'q4eEP@ܣoFQQvqʽ^ eR/v*5(X܁tН?k'83{|ri:GxOr G-+s؟>\Χq1*.bƅ9B# Vԧ8<|߆ Vjŭf!('lpLN0\~`deʡ[X!&_+Ksﯿ l?TC@՝vg5g9a&D09h[jLFYI>GCmowJ;IUim*W!JB6=_~!-yPZ3> fCqҏ,ϟ5t1XGrW†##. -%$2mIA7Oד(YW>ON̸J*-R!WHtt/!1h$8,D)DAVPX1!.n>w2͑^wG;% a%Q?(Ayd bP;5%5uM[{?S>rH7=:&8w3{f. L/ L^7Z{TCx ^<`1${g](Q'QǨm[mD떟ׯ(CR!dWv=d^Oc q_o[ ;ۍJ~Q(NI.EgyК E639M>V"?@4E+})0#MCQD%/_bϠ'\p04ߔVp ƽo9yDh=K.OO~*DI_8T =0% >h&2ui<yGX?";=Q7+ d[ D% \0 &^<ݦ42$Cg5yFގ"@.*2;(QB8-h<~x9=&:R8 pٍg/[ndN龬AAKAbj+NxvDGz+yyt 863HNa= y1-)vJ)N_ÓT9~*mbvv$JWr-ȯ^j}S_y+V2eh rKv|B@VD;CcƠ=fQ %ZquQ0dTTyeI]H{MIOv,8ȷHuiNY}sǣ$Mi@E:sxqb%9??mGI ޜE]%20Xw$2DIS蓙r 6br/efVj0UKKF~8/3T MW>(6EtPjN9E'M`#1|Ger~nE @^l SmU/'iEݍ[н3|e ɸy| 3糿L-DqDyi/oeOώd\NZi^Nw ׆`IXd/]|%UBx7jgyRnҍҐި݋?+Wz JiP'//8b RvDG"((@meb W\Mqp͔*ne҅7ʙΧl]$1.>F 2>W {;K"n~K ajk#>q->4z2趐s%Qџ&g}dBYeЀ+t2^/Ld>jنI !%@DxK"cSeүH$wmMN3H`:*=$4Aaph;E2pu%G|Jm{J H)Ip^d"(?w5{Ks7۸r,w.8*" kOY,qv#v7}#ܩmŃ4† l[L0u2By̠kOY+flveN /`zDe̮o'>>i+9Ddr?}+Hސb U<>9~n35?j ]% <^9E FX LZ뽚M9ۊ֭y](/mP,&x{!K'`;Fu<YVÙ]p u1CA`UX|Nsj醱4~JX)}Kgcd'b-d)#!qLOHQ- )@J Hݝ,߶!4y|4 '`ί\][MsV/2#_'A"+ \aOUQ2J]LkJ* .8Rzk/1֞|WqiMUZ^kq(QDo {}uSݭ/1ߵMӠ.h}N]>|MpPY~5&hBXݐ6V@, D74<'N?, Z NZ\Ń#TVpwk`09f9;" gRT(pa@Y3Wg-׬ElV[8Flj^s RMu3~ Lzpcفn{ڷl^m]Y; O2'Lи0Xe C"3 ' oJmԷ;Sx1pYk8ݲe&X! fp!mVp?^mcݒPPUl& eS0C *1%,jX3l٠i>7n zf дl}TJ7aݩaRu,? &4[pAhhG،-BvuרUm UK 8m_ ?Cf,kR9% :wUΝ vvV9Z; r[lFZo{̓Jm=Y\oivwMHj.S $Vrao,bW〣BtHɠ@}o۪sGOJK]ݬn:o \ub۪b 4_39;vM۪8֧˙,{oConyU-anWsckVڍcwLfQߩ[n樆ym6\2qMU砚( 0Qh< ,6sbjlv>8P=h+؎,4.ؚp hjO.hjXquF[ɄeئVcZnc9J 0Kk$) 0Lu,[kVvμ2}-KډD7h!0@t?UD)-g2MnQB_L"5hk|vq6גIչLNސi<|yTS1jTGmϪdc_y1v&*V% kl\zIyJQA]z@Au6l񙟀Z.;̀Hk Sfe.)ɢ'kϔ6rF/=Jg**`r45duiR54SpW@ 56b,3LbdY6鳚ǝtEI)(ٜԷP}Zx^ȋҋzCv*gg@'La(iEPLObC3U6Af!HQw)`̧HtAqQY P(gg0Mc*QoXJ09P C':e@DZ[GzsRi}mbuM=?E~p2*y~x~^^W6RXfBrn ; [Θ<S{ru5:wlz֩ѴUG]A<}r48^b5<*պ4XZշz;Ә޵mc"?mͧH\6@.d ܽf|WlO:ie#ٲ-{a;ilHyތp{Kt=Dڈ^7yeZᬎ@Z 1㧛m:IKḞg6 7[Tj3?"VN}mRIOqΎ\Wgv?M %#rn!ZYL(Gb>ÛؗB@sxTSsHg.p#;H% Z{Sa'IWy!.EU U*hTl"w yѲ^O;]Y.,*,oW\#P|ϒ3טݺ4F/~`I㛌?  3[2̟܈T;0-0BEѷxE͏幒\x+ ?4v/񝢔R۠^RA }ۅ~ޛpjǸNatJB PTV[3ZfVRr'Ż7W۰IoۢS]I'ņ mIRQK)XF1=8];8uy&}a} 3!xWd-{gb~aW34Ax5h7+ޘŖp_bcL31<@*(3b_"}-% QyFZ'ǛB( kĢX* !Nhd#_:R*iNd+aF2r&cw})z^$cR!*h5bQ!$;J)< |.DtVs$8ٸVk"l|j,9Q{a mEfq²#z%|"]dqQG ,v.omUO4ff";–*̝WӡOt^Mt^Mt^'x\87mLhqqO`WaHĚ0&,qm (& &&Y\nUӇ#u|>PA¯]QRTNJ1Ї 8#ͣụ̆7P.m.KNMǣ/O 3xз{RӂFVs111J1V8P=SIӇut!c_61>p#NjFy#ag퇃Pպt2hH#Pfj5 Գ?M܈֮(p0.(JXV! 1B$k$FJ*=k ҔKa1v2c4J۲(&xL6bp+F}ٞu*5:f\ϮgMog2>i\s_`7X? On}U-+o o-_1&C8K1:2< cnOP(A "I8[D ޢsI`khĘȫ&"0DN̈́$"E%R1 "rd>Y䙹2T5u.fa)+oem#\1[`Ih :!쁔D(LaqHQZD! '"!?ᣩ"aD,") ")J'Z*N4& M(uI,Cntt"5YnO0< !٦֮au> yv,!kIZy_pyѨ1[dsg./?/78٧Us#=@J/f03;E죷vlkb=,*d`)")]RLu/(~zfo 2D~ RAąFB2J en%2dY#|,i$MG|aOl{Qju=?/FCRἧ<o0ՃkU\|sWv\L|VV?e{_!35}S˛uEݙL@ RNfppuQ%e+F"o<rH#,/@JCimH׏4j4C)o**@ec3PRўkArhذ8*ͣliSQn sOfin*v&bw)rJ_KGۗ`mԺ`/{JI* \`WҌ7لKO rx?:Cm?I#/a, H]`w ~ل߾OXD;ːiQ1r-y?O&a&ɷ}F1 g O()g&b1(3x &f"PȘH ݩ-#N觪C|:A>e..t@%\ k-uBU%ҊE0TBH3%#D@HkF)57xnjL&V"ITpdzx$*^Iÿw6swclڢmdT tPuPcLEЬ_e稃(@gpv%`Dh|M| ɗ\<x,kL8vLU,p8%mBEkJjQ_nf |Ho 9CeD8屖տãSǭɧ?yWqTru[s~Ub?t{ҩriV!+ 55w?K{hڰZ%#I[uwri1N;a aM֡z: Vb#G8ZP擆A_Y/2YTI$Cgƃ7-yR846oY.2敝@+2z%q)kҚKm Z csKɰV c_V+L.9nၠ~ IM&pdi7{͋=럭Y^JI{WFCt,Wyg{rNl̽/Coi(Aӝ^2Ũ?އ bS8OUh1^&^ޙŇiqwz輆ɎsDvhvjyOX-#AQeg~&؄p06> H/C8L%XWe[~ױ!WμL^ȥW3 5;.+}f4d: ng+BUY+$zL2 ,rQ.7 4/wQd6zo_GQ}{n6!sWC/1gRsOmi2i$9?Of+A Ux{`RYvN{"]v.E|o"]ķ t?**(b.]]ķv.E|o %vCrC]Pi\P<5uC4h؊} cQzB1V nxTI2HQPmd5gY6}v,( o}/e?7>Oc)*EvX\@|c=zH?O?O6D?O'w]rן'w]rן'w]rן'w]rן]\rן'w]rןF[ %H ;\p^ X}n\$`.s^Suri SUɐnsptụ@110۝HEqseȝ݊H`Is?S62SbCU)!(EL'ɺ<']18cB)5UQyd aIia% "'"&%NUmgnFs=6{ceoC͔9H tPDDTդ>8z-# Ni _z BKN츬b=E\YIMڗFf`K1hƕ :<1Lg@O?I>ZwfI &`+i4lyqEH4f,TH,6I9Kh,aMbwW@Jwgg1uYT4hrNj'H $h$-$di܅ 4W-nCc!M0f|OպW:!(Dbc"7ddwCtek+Pzy[ͩϚu☑AbЎ rޅ5[6`K(b0PxpGےؠ&[qʹ<{26'v"dnYی%L&Vmzmz9؇Tŭ7n#PԈM+.m¥ZQ ըz6ٻ&r-懹{҉ײU,00Pԭ{)JG± dR{n?v ~$e=Α14(UJgaŕ#d Ω boޤS+mJSf+trjOw6*C ptӠ, TgԘVDm\dQ;dLg* y'$!VѴ7?OV*|,oVcr-|S`))in2:AY7XJBԨѣ9˵}rQ ON=esUG~wW? G|D1zS˙7S cb;+Ȏm}*~}>]B=fx]H%?|.:쟷.pkw _R^Mΰ=:rO}snT֖e$sjOQܕ JR8Oi,VWW.c5Wo}WD%$򰬹g@8i8ޞ@;wkOj:X?_LنcVR1y'x{e姙B^K)8厦ϲn_UY'4c/ _H~d&tA&lݫa)_yӻr6~RYO{<ʄJ_b¯PVqUyҠvOZ; mؿTۤ_.?-Dr]p0x8c6ћH2Kpo 4 I;VM_F7xHfZ>9|[h֧3Wq3KlzSBWK=@l;wWZ?ۭn%x,̓))8*7J</~,|7:)#"`1x!a2u)*n$e9%(ͭ *m`&1ӫ W udXɊ<"M2qJiRMiKs͔FO^ߖ*9,[A=(x|1(y$YN tFzq&S.$ `6SU샿Z~#rMuT|? L\sYTjF;j@Ҧ r:0<LwSh@#F !.[L qVqKf\dXrU| G(ײS4mtZ ֞n+;ұ?=~EoߑgrAt?;(NSi],i-挐`znu#MEDVBI-Ի֣- .fo%>lD,+~nb{mp5eD‚HWX) :"&J` sI&h2Mf̚ljvJQ黜7j1ex-+GT⪰nf_cب\"#AK=wtʫϒfKg<⛞.چoaW\s g!c8dj LT )wF39:8\NLD U 1˲p [𜴓<$cLK21123-'wh}?1T)-bSi^2\k'rYfNK#h6_(Q{#]V{,wW/ G<hOx 4A< 7 'zÉp7 'zÉp4SLrxm?ݩ2<(t+,'MDFf3&<X2@Θ.3hf˫+0hmJ.U˶gC{7*QBy1y|E>$(dH~%<̈FVF czl85ZchvtDq~( ځݔ``]YU3r#(2vde7v1u̘h-C8abM-K}{*"`7ĴCYj> z~ڵFbI2LqpXE{v[w9f=ila'tfC3e;$F>0 Aԣs.d9>PchSz3 #$G'RLc Cp+8 )SK7vThEd(03l :Il8A*OOeĖt,M-֯Gܬ|qZ[| <.za 1A9X%Sr4KaRZ Dj٪ [Rb=4 9$`Fk x-wR4rl˶D&Gn)ia(U#=2FDkxXu[,&T>euL 3Xő.Un b,}fJH!E  aiŕƀW+ٮbd1Znc1F:'c#{ 0 -('u0:퉲X(EBccKK?ȏ~׶=3S8 tGrB-C[qnfP[W[;ys2;M.̷Wg&4of>K|߿rsHX5{ȒwRo}Yn9i_sn\ G 5~}vl:RghiB%y\i'ubs b\e5@htm\݇[Lȸʙlhv43)1x{e2H)%ŤC2p6IiL6*T{/e샕q (u}o ݧtŠ>%[}ƞ=VS^1@|#Xʬ%4c9#&|@* *cwyeioJZR2l Wyrw0S'2"U IM-aX9$,RqAI ˪]Npet!uyf^ź ;_a5S!?3PǐOa['"'"'"sAt^pʐLa Y31 9-a(Ry.dN*<6j,d+_MÙlQLsB3KcW_BĿˌ(Zjo)zQ*3 (ڍ\ٴo?t2r67A ߆Ö w̝W_r_]$jwWK6Ůxk>y{39WC7~zaut~߼V[ H}^\+I/9>Gg 8k9/c7r,j7̉["L ) LHmx422Po0℠;"LD0;Y:osf+#=a+8XJL|<ò.%"X2Xl^ېm o [~8V;Uӓ%2OpWQ~!ϱs`_ laesσ2ijfGH-i|m툒mL5P9tY"'Rr,tMafXPqZ{$d L_{hcɝ7WEiuZ(ĔbrX #g.:' o#u$F$TH ,WR+ ]\R ViN,Q>oTPa072rCcHҲ {ù;';'_}Cʍ5c?ۭdC(^Om`\ɷ*`Eʸv $Vn}ufzoUN1Wov;c VNו@wWUqc#bjMQ{u\` ݑ1)Txخ *~c"cC։3X>R1*wU[e֥ǴU|4=xx1:DB%Ɯϝ^vNJR=&Qク@&в[ZS XWjj\ZͬuOH0J1^j,bZg3Z9Z Z]븓j]Wiɺj-[?X1?·'|[5Cmrjw wdJRÞ{)JGAssQъәN\\ˍhuHF+*@S(镏[38#Lچ˟:WŗDr71 !@uջUU&u:#x~=Xz_xw¨ ?냘8#(#\|yz~_,ME5jKe P LJcC#y_/߾L#H@)r8'ҲM ~T{C B@N |!`)(P `y۶gd ;FxNi-Q]fr$r'8<@)(!>@IOБ@rd4p.\u6'"KT0wbKImr 2n :blCJCn8W4L7Qa7Lܸ Ea#P1ԂޖjSVeW$ovPH Va*["qan[~+1Wq,2Z|Z; @i MtfYVJ|ʯx=;Q6TĀxP AqŅg֟d\b_Mi(Ŷ;'P^<^?E Xy}W\J!aJwx+ْrx>L XmkٶYY{)C+]k*8iɂ{jҗgUʉ6^/I@XaG& @zw $P"^'\Zz]e_KXIIH]wZ+u`l%aT9s2 1+U:^ a>Xҵ seBL#6[l%VҚ"K;ܱb#xۃw:j~D}F3rgk/aS~U(F?y27JEԚ+g}64Z yS]hJ1,edDUR&&{֫SU":sg\r>TX͇ǃM ؠLw39}Ȅ(H}p)+iGhj2fgܳwf=_ݲ—t3,pXh,Xΰ `QRyEdAVi&wCb*/)]쐦PZ1!ՓrFu~I^ˮ0a3s}Zv}%xcf#c;C^jޝcUc]8ŪE;CfkjɄ.5uְrQSc+4F5EH Â>vFfhcU罋3jY/S)Ԑ\P ImFfiigt&4 ku|$|N5iQ;FF̔ߨA *񼋝! ٖWJn F-v(uuw(22PjtQ!Kv/)LLUWxlk?kϓא .٥[:K*U}L@ɩ  ScWZlo,Vxk.vF"&)-UxwBe4ٮ},]7/ ; +4ɩ4`6Q\|\) ]Eta.Mk#'ڀ DoAٚhv.ZǓZ wՒ`b 8]uW0JҦB2 r[̲9@!_?yva!uj5EV浜ƗE;VO2_vrԗ b\Ӕyv ..84 ̶=j~7/X:Ӄrߓ _K&ǪZk*. Q' <„tJ:8ȗ zKPg=Ks7fbΫώәف @]Q:je VC_Vg!bz1߫=o2-*\fkv,jSE\^=w}(eO\Ƞݹm7RWvrũ7tn<@W?}* .[shcyge,5h.=}¡/~xޅ~?Rt<F]V=ZWQYew։]=)v.1 ͦQ7~?N'AX(>?EǕBboǥL9ͳ0cLߞ~(s1[4"%8+n*bg}>lf6VBPI%eV'~I5E8.ʰGj}Zbj|Ӊ+Jl\3%CY Q"V+YMe> X[n:h8߷l˔zHSL- >r !Vz4Ybi&.źB&p)u./f:uqzWeS$~& $ە)P686E^ΓɻN/IwGY.RX/0R+&f<^]_ž雰?z>0WIGEɷwQB|xk+̫#}wqs֛0 2L"nnE{\DCE_ܬaFhD;yտOjdBf+?e2v~z=_7M|7lH=&AcÆhPqd=P|nv63#|Y-oEW[**ZCV&$u˾H%ֽL,_J] %$VȼtҫjĒ/J 5)LWoY,Eݮj[k\s؟ԫj6Vk8osڛL6#)ا됼 1kȃwl( w2v}àqlbRAi(u+bG }T+9]!*1crD=Ex{IDgtĒ;ǓqhܗX*m͜)a9㏎Fh7sw/jMqV5:i =,bf)in8[ %[=#c9d%6Nnq}781Ӝޖ׏b:RmsD&!Of$c4Vpg&$iӧO{w2a7?w͓Q(9U}7rm4/midHHh:1N{Y_d)51RKШٌt@TJLlUxBz!G{g\ ~>KY_MKyIeujbOӺkSd4JGnI 1h ),\][+=`4g3F_=?o~4LnoOa[uRPIYajtW7i,H%zA }$\#1$ M}!Yn`Cυ C(62/A$G'gF>!]`09U$wJ,"}*qO+0fǓYaMS\dSR \ \ێ15݉ԙ@_äNE-^W$N1MOt&'v:T,p̠rjsK.k妕g)76r[@yB!:Jt6mK uȍ^[rhb+g4rh+g)- 1GB/T˂Lrl߭dHf+`~;Y@`Be8Z ӂ)]ucK*ZR6C5e4[YmqZF q{FZnj2gm֑UR[td8/!L, Q<__QIJirmx Lq|S(eyCVN8 `&2fMJ:Mzy%UtٲTX5uX'T+\!p“IdECs[@.p<]z`CVS*%`B <0-ʡo; K+E=>f%BQĊ]묟FOP+g]Qetٸ),I+.iؓe e kWA' ڐF (yeXjbZ XMDi_$&@ njCPH$+{Ɓ.xQꍩ::!>}-n2⅁WJ$y>fc'`TSpY-y8 `+,DJq reHCB$|؄KՖ3A)Fgڪ VceCgs!%"(v(>BڰQeLY ΘX^OXُ%*ɦV1 E1!a6({=v-ߙwa[+_ǻ *_DLF>>a"$*+9r2ͤT0Q\H &>9:Q/g,+S1xyFb{rI!ؓR2BWpʄ瞧C6t<1ڟzTJZu3>%>SCCYvz(xhF1`["gWU=(1DsyJQk0^R^Ǟ"?YZ)W% >67sBcsMh+ Ѻ:BCl=Zf??L~Ͷ 3*gRbrSi6D# ZM/ϕwL!Ʒv:bLoVs1~0ջqP XHV[7 pr97&tçCdڜ3xw6}1)VٿFxF:*D:}fmy-yШ_Cdf[B藻7#Y5X}?}kһFrR8(kݙ(W.Gpm֚0?zM8A`RGSJmΌ6>aLU}{ci΀1T)b+t@HmOjRz\Tkb=b| b_H6ͭB|m߆IWgi [H讱f Ęb3 4"<] 2'.bCXT 䀑8SfI(X>>˧7ëZHHΙ`[9h (xȽO{.>CjU&֟/ޣM ApXʐ -zg՘IDk1@ͭɧhfXMuri{z:X9ge1[Lu*ֻU L햦gxGi,Fnn6{VgJOf$UyÀ.&vyt^?bݒ6RNݭ]m>iypY-j$v vG/aTa]vkF¯U_7?T_|w=}r :Y |q8)K+A?_OMϗ | C-C'{b|sOWMݐnlfUT^P (~\9LeWu>ɦ^1Uhkv\CRKM_!,-gzacrw2T5KAYg}9;wY/W %!.|d@AYը~3*&=S o) Ɨ$_y?Jpݫ瘨{y~| 6A=xz${t?޵mu ͻҵls3Я&mN߿^*ȻS.֑ftP]}~SLxq=Ŏ_-^{MH-Řa֡Ѵ7@'Pm%y~kdvV;GJQ&_7_5Oo0e;t)Ac9HBRن<*V,'(TqG6a8 f.WW `.KmJWM=ߕf3ٮSmT/_p<=KɆߧw 5uŧw;yC!kGpzz nXoY@5z/Gͻd~X4}xIg'FUSP X!0};kXCI/iג]~q3@u%ؗL.kVs2tԗx7} +O3%| zY 5|Ԥ]#*`P mixLhYf_tIHޫG-~Q.jʊٶJN5myvs{< B /PwЛݙO9QUƒ:gAM370k[pnGŇ 8rK,ꉽ؄t',>j?b0mr} X nsHޝ?f*FUvLj;54xtxDr+ 5Y'z<쭟%ICsQ'-{37p"I;"}wcgDς~oe?.Hr1Bw)Ͱڞ/E'SQQ ʸ!y02P+i0W4f4aˤbDOF)7JlE3 $^z|n.$ U+keMQTkX4OB.4헤Cv<%eSjCxyXFR  ?s~]`DZʩR ˽)׆7[WqJ{z5]JFHq|nGm7 ^`A ƜZ؊}.ơ׮V!uJuyH;,?n:,_?pt3~ hd)<x.P!`V0/2ZDJGt`ZD"rR9:[H"8RrbP A|@N@`kC{iLv'Xci@ QzHLdu4(&9bkCkZvցR@N3QSg|dѳdu8Jo5zƻFR74a8> :fEǸ/{qF<[09sH[G:I ߈߰FjvwޖlCv#nB1l ZΝ7\ 3f4j|sWVaJ>M)nTn*y_˰,?]}Uʆ,L7?]KƔU],vZohb}͛btS4HcTڪQ=Kbm`FlnӍWd}}=Ia[߷"jQi@^kֽ`[mFbsvRؾn!P!/jx0ղ!}KI-\{灈Fah2D-{SNd&z6%o)q<"<[@9DŽRn&x92J#"( -J)JKFF</".`],>]蝙ih:F /附!i檬wWWsϚ¦ F6ﱣW969OEG`}Q_7oƖ+N; d. '<+z̹`n2d<-O%<"kY"M=ꚜ}vXI5Wjl~S#͈atg&4BaElX6kȉR4gKr_+Lf5=Y0m̤ f+YC1k۽ԝ~"96+ulr'h[[Q:To3|08%`E~e)}\0-U$ֲ_B=jELVP{t{b,l FǓQMXl o8Ad_CůY~oM ~6W7g 0ʽLytNS %,SZyjpÆ~My]1gϳZ2d> <6;C nz 3=jZYeu wWҡ$Yd0PU^a oK$-kI V_ #G%a]/r&/~L@04%T8 y Қ)RR%b΂wDðq'?`h;7d쿳ɇ:o@`eu +wyY_?{0#8H.7 YNfSlYR$͡-^VVT>%S,he/\ƀbYǟ7{ݦἩx|*,{ .Gj^ГqzއO3X_zEyw/~Ϩ~?gZxȑ_`v&,M$c2nG*u:(td ۽|<(E_F.~׼/~ŧw_.2D`9u0="Sn hd8ۋw_/&bEC@r7Yk5TpǂP|>!,qH(n<1LR$8G+%/giKSV+I|TOL@m!ЕH"v&+t*(GņzeN+-B=e)"6cš& ttB[xċ0̞\)`$Z] +GJzx!;9'$6 UB[o*n40SePml% !dXe䷴[. /wvP/;"H]쟽:Nk;OaJ[;΍J'zku?}asN^ vp|; zA4lg eG;߫?mQv"R݀VD[]^;ӾRwwoC0"(<Wv)GJ\wC? Ϻ# ^' =axS׷Ϸ/EK8H&e[[jVP+XV{jNՍ=~6{ëK_Ŏ("s*Dyj2\UI ]BǼm{uTnwpB OӏN^~wr9:L!o&{)R/]G!oy_z7ӌN1͋te-Lm-R VģSJ&ģs48 w>=L n{APx@a HT,w HjJ(Fg1"Tx{)N᚟J0vՓfn9ӊ?|=Q F80W_3/_wyx)jSֈ}nÓg秇'v?M3R涖V'zCd?p? p"p'zOƟI6JMrY/#Ј{-򌇬e ^]K+  )vwҍIp^6>jw筳xP{jڀ}#@O lpՋC/HBPsZvG\uj$X.!UQs#2/0Ǵڅ)7=_VvsM.ܚ Et{,B8в@RM]套&Ef}Sat n|?yig5>\}`zf0o 8ߚ-\DS# /h*gc-٨p>WB \ijW&5V@#Ah(!h(!h(h(Yo֙YwZKRe%?!2o@XE} +{m+7mb/H.#/ VS)T'K"J(g0ƛS~Sf*W{ B՜xj4墛?:Tl3f!(pbXǑ1 ővBEb&b)& ښ#Uf*O`,iFg 1DsBSC޼HfK'dtBXٰ/$-C$*[&0ؠ(/]w/H [`Zy{&}Ͼ+w@kPtɄtE,RS~LK@R 1dxo:q>Vso{4z_ਾH" qB}:m= Oz3 vը6v fVݖR즺m0nݬ |c Ug6XfԘL0j=E~9uyn[7he˜aa6x9Y7ZaW{lXQl(Ɓ&T^uL;a*]$ԽԺas[/[^Țf:Ƹ`yMͳ(7oFYvj~@R2;=[Nt=:XLtr"Gy}}DtMfo=]d=i9SW\H9o`^Ay=A]ԱobXdj5鱣nܨL \4qoݩ*& DA_E_yso_͟xJMBQfa}[Sआ3,2DbbD|g՛*Ziua83ȢhD"Ec<#D:L!<%0xUq ZAqʉNT6Q\R.5sBNKbbE@\ qY*'z)Su ,XS?ޛLn7=a^mI-0 /A[Oy }x k5`pqBLV# `j#b4WƹarO4Zm%XX9!K(aZ %4K#ܴ{+e5Jm,B|ܻT;E 򨱹 U ju:)8^D,Cs>fa 9E#MONƶeA{a*^:do|A!ߚo!4y[5냣imT1|%W$5cȔ؇/(W\ӳM!d~3? QYك\愛ƃ\~~)')iInIؕL{zN0fD-M.Vf$ɮ92arE'H{-'R*eJ8%0k/ )9!I#:T|ۭnwF׶V7"x[O}9fLaɭ~I-s e5dO'Z t2#?ÝS%1 eHhCaQZK̘#k%zHu5IjoӇܷo{v ˹(TDֻ]Lo"L=q}0]d)ܮ̀RvAL|0m3g٥ =+Zzﮇ7]7>d-HekdY7p,rNu -jׇUW\]:r#_p^L-ds7GY<~Qu<]R@K/{]ˆS5Lbde3Mu׹h_N XKD e"SELQ #i0#i.>.鱇`_ح&4pI[i"$\9H0k('meyBVr*ƌg+x>iB{G򮉃 Kb/(|qd"44'4B0jΑ0uq49#pE۪jRKoHz .0cg1^jb{MpB1S QƺAM> o4IQ;?Ng=6c@:PA= RU 5$ 4&a:ai`IdpQ0{6@3@+E bL1l`L!n\ FJ[[dD;jI"ilƤ%@u1Xcj9#>`N0,a*!ڰ:l">XZciҶ`50mEtu-%7K %t3qA?Z'Q:QH$FL>TpO(&='ztq,eLcSlj?0}fb>qpz*mb-`TbeDE]5}XYzj20יJhdޙk7U=L/fvIT nսW@|w㶖om-]5C[eRy,`X'4If'6{׽1Z[x{ Zm LyI .U}3 0Ih3R Ub?n>}??&uW +3)|!IN%5x |9+MNի/ɖ@o!FW$_|ӫ)~;BջW?zA~ 2 .n: <Ё{Mo7XWMC{ML}]z6{iCniojL}%B#mLrTg#p_[/GXa[ͤû aIU&al 6@O6MvVF8;J1ᤦ߶^0_et)"z= XwNȴu ;-sbRFƭZYL4(B uހdj7ɷUo=)M8E.Cvk$+m#)b\|0xGK d3<%DKιh:cG8kQ"ΰGZ RW0p#rR- S?94}~^i"XbaJ0^i&Df־3︋ZX"3l=8AvjQ@OH+ULk%Y [)3pr50j%@[g)^K7{Tki[v3ی$Bu`r^W:O[^tMcYHPy ҰdZfiO#UɅXу!0Za;MMv16ےk"輧v]p|r o?QC&cR5K/^^^{$^^^^^.eId9ǖ@h0z8 >/ q:T _3RK͖in*Ě{J Jw6Aa+&^(j6Y0*Vӄ%\zFm/ ٿogbY*k* PZ a&:H&U[u/.mqr\ q9~۵;KPK}\hWK?4^Nܴ ZP .~Kkӽt1OS'*Oe|OS9T>OS9ԋtTb-eTS?),^dE p W1#f'~.z&G%juU CCck@Lv3lXyH2ʊָ/b{ӗ։O0YoIE#/w䓰1(L9n\D"#`"R3NH.4QXwq|z=;.u6)P#ަ{yZ|F a" ;l,R@1+PϚadY7'7DJ=!bOL2__V;šsͲWX))hyU1ګ TN=Hv {vw`t#mۅK<=+t x&C>L;%)ϒnߦ/ö3 %mr7k?K2ftr>aX^ң:Y*v(6sFhܻf9+x.AxJ&o(f wVc:2DM*z웻- 5x`UŃi[S'@'}=}fѡwy]ԅBQpr 2a\u3:Ct||rVR}@5_ UxAw8TΕ'0>TjW"GxEkU[5}䚔'1u:1}i?k~ݠ- @haKj \9,sIpjJ1 8Z!t!p>ҽ 3Us r8j;,Lw񥑚Im&C!(q0+^ 0Ҋ;1ߤ8h4v' exq۱~zql?j-KSOzFꫨ)@yHG.[OU7䥵%%Ar8?z zf4Xi"\J=J07 .cB! :=V3`L?e -Y/5^c;κi8/S$!Kx,ST\ssZ)u*LhdN /Q(<@mC-Նb #*G +*#u+d4&jPz& OoV/WaX'c֋tdR=00A@YZs{LUhP^R4f;!KoTy \ɒjY}s3#7׆n3({6{\u9;ݎ`;Յ(8 "Kx0 YF * 9ċ 0!Pxxyu1ps8U\/wϵS \-;)Y~- }g Yʑ(gSVx-7-9UTk\:N``M˰ Gfz5KRO|A#c8MDK2hBJ 1q(# *CT邗6 Qqx8ƶw/ҟ ѷ)Ff#Ps#f'ӏa^x;4_NJ@xWZb>8ђqIV>Vz4Mh0IjHrQ7@JXΣBK|~70W8GRFX)"]L eZLF#4s̺Z&nŒyb)TuU VcnbE=%x04PzKQOW#ʮ߳.+lVvh.7}x}^Aӏ1HZKv-#":H8R>?!v+#f// |2B)@aeD0띱V{e$,zb5-l)--;ӜƠF^ohRT*Fw4U' ʹ\YyG;I;FTu&Lzz8VulҝI :ku›;420u^MΠڞsIeJt,˭:R(jMzHԼTr;=$gp{}݉/ft̓l/’♧s,zKl9i6o=<뫨GٗIW%;07Z`F<1Cz`ԀXKPyčH3-C?S0j|FwTu9%ꪼZa+2;_DP #i< G$4Bayiu)Y(()#1Hcqt9pwt#q?M<ƝZe54 (SA' 2kfH稑ZH!V8S%Kp8Յ@]O#+ TJRQY]Pi%S@F%$/iQTs:.8\Ar̺ ]*̕Oց67%,,L )ZlbǨ A[Ꜵ1}ja idάZVGJ.:uThQQaL\eO+y>vCƽO~DqۋaZw3w䆻(9g|yS8c4pf.}S\|),8=SY?7@ ?5S&' ψSI`J+m#9_veG0^y6@6u$0>-yCs 9)gGuUt19bs̍"8pCϪNWH )$Ӕ.-koC!sg!/h׉Ζ~yO6\om5Lh9@1Xtqaڼ:Z;1yEXfM'f0* >YQvR|QknGu*03xd16uAY,]1%כyk*0߂C7~EhIƞi j&?YeAdOr(wxx[:[9 [& nVlibfU;}J^dR\36ʗrCYY'|{=oYQ=j2|AeI/5I8' 6xl[}% 4F KLD>apTYwqb $7}_~"ꧫoyº̝9 ?'{ N{k_5M׼YoѯD]N?o+\ \>^d =Ҍrܛ?o&pY/ֱ\`@ʰȯC~v: B`jD?4;0{tBB)[m-ib#JHS2r:@MρP;M;e҄i.uN9za5F6I{L!RhȉhN o{ou_HgTiO`\ZnE,O4葕pd(IzU#>I.Y y;? vYb5g(4öSx\BO'G9$Կn` F.aG̶|b*vLpo\ L1DAFPn*g$L, +)Q>|'bT^ZJZ&h [Jf)sbx!Up{,'a*af0I}@HtD2o.tٳ.$Sƕ"h6SHj6 LÐC°׺Z`v\uC5^ƞI`(Pz! I[mq1ȱ8NJCbb».d>KZ/iRZ s: ꙥ5v_^@uW?&cfc*F8} -0|2ȅ)rOgnfqJŖVZ]6[~hb;_ B&28`pg`lૅϓc꣜^`\ywF͛nDT(EIL ?,Fgy~,aE O Iv^ @|ot?@{ñ;!TLVv‹nnZAll1VebOlWh6$ ;6mC 3T ܦT2U$N9bR|۪S^%W )p!OGoN!/"F1[ OWL,A/Wm:-b D)" ӗ/d6Zp0vwh a qK,_RrHFޘ7i9o-kzgi+ /c"_vzG#ZHhmCiVSaiC{tazgC9p"RX PA RZjȭCJo@lqϾj1 6_ęmonv E5ڍyx/⛯* 1CHokŨw7㽛XwOqC`{{]*r1!1DtBk]/j*Xm4oyn#6C] l5[Բԇ=gCfXqê  kzlY- u-05CS(e|=ir tP&&WyʓISU R# N25*-QZm`(&)2@%-UN7ƼEqVVQgv[}[d91q"/z'8q,!cQz9xGҖXַ&i?lo_1n㲔[H葚^AJ|-˿H i֥h9L$UXV Wt;; ݎEGw^[g0j }PgY=-l<b\+0/eҰ8JR L {[ܐyC䴯Rzt{E̡aK 1׬ګXaLr y$B;5父:8(#^ZE 0cLqDB (4(;pZ(O'Á]QTN7O6u`\kʫ>9N[ 5k#(r+G\@T_: $~p"c Y#OqHJgsH{WA8(@ᕉuՂJ"Jy6=婅\͝A9Uiv;6g.cR%cXj$T<`Ef X`U:@6/2vB2 *_T~ҲvˀFqp\ԫxUЄcPPo\R|5]~~Yw`sN`"%NTcRNRMʀJ&_9e@39 ȉXKBs$sbĨB 8\^L2`ఊ>žq/x,ZqPˣc{d:uHYFڟ(K٫8?:XH֎!k)j(( CR'تWx|JHiFl=bl仵u)+zghK D/ g BMJy0W"%T;Ҁ(q Qƒ2pIizL, gm[B-rËMTsXd|5_Gx毵L;aEaTt&_QeIh?w䑦aIUGRN,5MfQrvO ڣLFӸm/ZX//0ʹJX{bF=0kKZUf'?F]Yʤm.w>_*7fITay&|gDƖ1! mr b"ɐ"Eo(,`<]A[GˉB<ԌXp߾e(O? .L k3cWϥR;=S]M/7')7Gw󄽽M icf_nN,6א/?Vxc{3'SÈӴ8NU\C.sKfd ɺ|dB5NDSKݽvVkPR{H{ ݶ e|nAŴ^˝0Nn㗆n[7W.ndRq7فtcz}ˈ$]wjzxmhsʹbl=|C2A*Iʷvwx1R{[Vm'ea%+Ddyq{-UqaĽy[YAtNzPYb^nxњɠq8vd86 Y%i_x#ۻw矼=n=)̍w}{zrB!rVeVd}yd"5hRJnA0׽S3u*\>ǵU|gNyz=l'WVKi]36 [hCcSu3.vX|]'h1RV9 !RA0QIjPD0Hx Qð P]R2%D&oJxvKGE~r @!Qz,e[KnD]^DϽ 뽖A%/h:;.<, /޺i݄;xq) +3!nA@6қax|YοG;R FO3B4 åm}bw!! y~I'6dzgrsMA9 8ve,2T6ʷQcp +RZsWۖl/jݽ[^{9؝.򟛭W΁ Ρ];z[^{sEv8QznVp``*R >Xp"" p66F+L+iF8[z4@Vݰرxbc0A: {w-mI_Az9 M6@Iz]ETQΣ,)ɒ,ڲEh*-)pf722σ&B,ۘ% Ȅ%HIc;&bƢZTrE8„aI ViǬQFYJy>azqyVkQiǙ St=ג3wɐ3:)P5,?k}0nI尵)a8CI4Щ,>wȩY2ރ|)%GexO爑Z҈RDIϑ9$CCzG%43LTxGDJNcRY&Wfi%1  Ay9Wo.*%C%Z67&uZ(ĔOfm9fX 6h!0RGhK [/>voMF$ ЙUR+au>m)E4'M0**0^\ MPԵÅjTUը.\t2Y%Fbh\x~ ofKz ]xWnQ$XaYv@vS7͋ف{xB6 Fv)g"6[R%3HV@@&F"3D;RP1EmN1%p't>6(|8!2\H,?ݺnoRi/P1og10dhB)r)(_ۣ"OpP)gSBF)cǭst8}P泉_GOQ:;NaTlaFTSy`d : |cPQh\VJ}7#m/1zR'WtX nY5A>.>>M ]=ϘeWu:ɺ^1UhkqTBRȥG!,-xggCܕfPXJf/>>¥ L"K'+Ie}O2`tXYȦQ~jTL3*SW3܇%rٓ?=K~7?=~z'{z }x/`vj!hm0S]]+Tߴk:]&g=!jka_-PC_.!M1lGQلgCktA8S?Z zb%b0JPдNd@(N|=y4H)ʝao)^ au+tAc9O.d@ B{1/uyQY3I5w6tc*u80$5IzXFO> KKB9UaaR"mXb!/Uz /74XO(" ,.G qp).}Ԗxb`̙ uOQԤn.Z|*$jW7|uqkU4Hx.P!`V0/20EJGt`ZD"rR5,:_E.qV%a3 PA0 ('` #45Vƾ  #$$8XG!1z ",iP%LR#1s:քG[Ik%;-aW  fӺ;U欣?V#qWŶͶ~d{;8nldwC5eNB-pUZ- IR@X. s}*ͼc?{7;ʾ9Ym?O\r hUeqc*h%{gq9A  aGZv,wf-|ь({˂;7xp4.]|C?L}3t?{ˋDGVw\7Yɫɹ;;^ {MGvBr'p;~w#0I{=}TfQvoٽnvIjf @8 dqDN-rO45 ?Ù2+n3K7̉ܬfdntʅ +z! 3od2ezvo (3ؑ@{ƪtRDkwT0/RdɵUnxɎ1r9;g ipz> ѧ>_vK3FiL{*ӥivp%ؚIT/VgQb_ٲyrh04e eF:(jYI*oY`51%XvdX77(M hJo5]lzэ}rۛ?r Z*__z_yPCҨ@_KR>;,5?1Cc=AdR׿aےv>2,d:bpA`0jJ< ~Jg<+@a'7.{uuR>dB!O ʀA}tV:4T4i ^xQQ3@4貏,Iz"ܳ!|;֢Ă3 Kֳۜ ߞ?}Vb[^yf'̫_SLk~6 k*ΞSG[ϟnu֛֛Owϻ@&.\yj0 r$LsѦXOz.+sL0 3)+̷>_e sՊZOki?5ƟZOki?5Ɵs&-i?lqxfsl@XRSKjri[Kݺl ѹ)%no.oĦF&`]*Wx=:Y}Aޮ%F gg{/ C|d 2Ni\U's9L0ǣa/6X; 3|%gST"u]1_c*]4P\tvDe U cJ#68R$#Y@SJx->cr.wTJ:oA KDeC @G)_?Sd>_wliNv9jݫow~M24U77O ;A 8ޞtezWP7c9TPb:[ixV#NWx wq"7=;(g>?h1iEKr_+Lf|,0M.9&r6ȹQjEX#aREbHZ!YfC1w۪VK3nmK7pK2rG<MC VÇӭtJ %yԂ5I&&r-c1}N7x ,%f1grHxB y_.ltJw ZGK!zƌƁZ 1b" *,8ZI} 7DjR'-j W:\C_w[y0|m1M6oX[coj$&SBƑg[K9|ošF=;3InT 9F*CԲYW߆oumhHB/tC$l+)boWNfޢV) b96jsXCg;tx-š[Ɩ އl[LѠ׃W0ynM{TLy ˅v"gbsƥkWݨlR6ثGKp^#Tx<FrKGLb"#DK,]z\+B$x-P0T& m0RS-}J!"'^qs7\.QwTa9nXB~v65KFu0S_nej5|~.a4_}|YI눉Z&) Uɔ$zwZ8 V3I%5NIg JcYir$\R$өDV2ފ"W.\W! `N ϑBC>]Bԑ`ʱ#2\*밊a,] U3"4^"JFcMD4vy[!@k1~v#˘(Ֆ耍@,6#X7 t:6/XE)[E26 .λȏ~Ѳv 6IyJ]:QD աd;ZV[4E\OvWGeՅ `hn)1H4񝉸 !ph.G\41p%T8 y JhhTq)b΂+k#.62Np{|Z9Xv_ٶՔh=uuZmא^C&UF9!Y[5"Ԑ J?A{T@f"Xk85iуZ(ڜ-rm|cw1*_T"#~u烹WEFtհ]h 丿zѭ :;wf@(U4Wh!2\܌TC"9n58̗fْ6b"Hn@EG +\ס#'0:Kb{nu6Mr?-q#5%-K7H%nYY'%?0}L&)mVayf |aR/1",DDꥦ07V 16rlZKKpOggзo_,v݌S/gԦ]bM%'B)2YM7'7/cu41%MZ0۽5L-dj<^Ʃ˗w3;sM8._:xa'DWSÖWz!v(5'Զas 4L [mqF_"v)rk&ķKz/i %ګ6=P8NU->׷'SCa㝫m7Ok,3Gsr4n{{CD eǼJQmUHq`t4u+wڅJK: tҡ碯*rYDkv7xےRo-iM[+y2udI`n(ܹov!LS>Q"psohX|nDC*wМYFrMlU6`*STO$ȓ}72ҺWyb9Y:%Rwxr$Fڭظ׫a37dKXˏrѨQl:ʀ.S7Wٯ?zfa-?68Ves#ׁYg?">[S%3Hf(v@&F"r1FRP0Eᢘxw:V`_E-6~3iatHy WpЭn7i4 c~NFЧ,c!\FH6M-Ѥ@a2ǾKѴ",\Vw?-G0s>@%S,}1҂}4r|t]dF _ZRwʫػWKjL;Z}*G1CٌSs{EU:߆$kݻ`oIGٴT3)k4BaWA?I!=.!0–pڙܧ4TI)t}WG7Dʠ/# y%0V}Fπ ]!ā j/{S.g6:ap^^eb֟c+[  ~ectn3#L)v4LKa>{fƍu`)*4A0ɵ rMRHx3jg>OLj9+8Tk%i*<67`JQ1KLZ9ϋZ0<_y~*Ƶlˆ=$|6#18q5?,.Kzߌ6iS Gq}.H=ɍ $'HeZv0Xg:#z2$&W7 š#2\*밊ƥ fJEdG(ci{VϗyʔsZU'j[ ;gުt,ݽDۍCu=Ԗ\U[׿-?}eE8bY_\n/V",!̥x5rHL+/YnVCmWnC؆ݟʟ/wNRz:*%o.?M~mfun(|^%6'@[,mPVwTΗ…(-%8dz76<8R~pf|Wz$ƘS#xRPW?~#<(U+Yx3S%.J:ÙxٔHXwhlz%Ƹs81­Ҥ-e[e^>[+JKatmZADk 2nvznk~MOl6amYqy'?)}zcȇ|a VT]ͪՔ{G]Jؚ"Jm/g ї .)vpdI2Ep ix2Icɩb ,.A`+*82mYhsN=*<$h@jl9 -w%PҐ.wvLa+ ~ :ΒuTB\1 AMM(ˠ71NatNeHKa;&Հ\I1#~hg\AS@`.Đ)w 1 Ü/p J%[ LREٔ| p`RP 3ʦ/Ȇո7UD&+7/4R2AV%TLgԌQ"]40苂*X5*fἽЀW/:~ z"RDbkt]ZJh5{TPlBku\#"g[(  J DԢվR)l,@x@HtvD_kMk?fR3&Bi0tj@  J,F%̷<k1E%eA; B u's`å=QaWir5 ZǁmZVxƋ _* v%^E H rc005n8&8`g찰ڂ|Q cA1J H"H2+Bi ,h\?{ ȋ ":t@2GM8ڂ VgGESQ_~( Ӭ(al2JRPaa0>O{^s@ MހwK}Y]!jUW@ @v3 hwP!QBGm` @;k1C-xWC3K:vU 9GŌ: Vhdr[X]v>ޔ57g -骅 ÛI`d*BmtnLoځfk]0!58EM8=fMfPO,1 8c%ˉ .Z6l!& _Ko".ń@ZĔ0$‡0@AS@$DiZSX~AJhq1Հ]jӇq:]ldI,Nu"XLLQGu<(m=*%>4ylvrsoz] gjC:eX瓱 q2O=cY:XF2ŌemVa7m*SK]-^"Yz v@So:FεmBhbq-ʹ>OU_s':MGNy.;v?'eSlJ8ybhmt{;|VPvr1]N]N]N]N]N]N]N]N]N]N]N]N]N]N]N]N]N]N]N]N]N]N]N']m朲!?rL& {Rz.gJ=nwE{Zخ"Fl`Ÿ _ʛ.̢2$Q){GJͯi,#]s3V"_l212^k7-<5|rZ}s&ZoZ2%» Ӡғ\dc=&add_00k†EFacl]~T5FiP cI<, #E1|\ՠO/74}( rleC {>6I%-O-"tejgbo,*|l?^#*t:\񘷻ӵ&+3WXt3˓ٞ Z:ޛc.18]]/Kx>u8^}agθPi+X7!@'^V3uD(_3ޑ6NFry}(q;n;.;7_ϬL]N'<:-1柼w~sͬמ 6xs[O,SݽRz=P@z^z=P@z^z=P@z^z=P@z^z=P@z^z=P@z8^΁Sv>ϧulz=4f']-zB:Uf9yt1BQE2.%!;ѵcܕ?| { v3̧lM5%~ʷ-i,akO -ã]܅U}cJUu=*c~-tfdQYl:]ԁe}/[0/" {8Uk];'5Ukj T5P@ZUkj T5P@ZUkj T5P@ZUkj T5P@ZUkj T5P@ZUkj T5Pp,k?]nGHof"Ay shX/&JoFӛɥ7WrK9$泸/I@@}3$nIt%Gȯ22Wl;G|!<Ób}滇w|qƛ+On_xyn"p:ke'x]zO5dZ/ݝaQP~k3yE^&\QwE>Fz WOZ7Ӝ?g;,ЙjlT"Elq 0,͏֛Oi2mu/|OjRuVY]W.Q^*~ڱގfOLhmdĝ.D.eʊIiʜHCTBa[ך;UduL(KIݷ4۸֤?2:_KN;] gx@$,~f_FV]HY [+t|;6i˯آsW_]U r-lW.m9$s/S#Xy[<X.5xhcoNk*傂{FnF%a:_Ze;dsEDĂLhܐ$s/IMY4!(muk5$Ju+A;KF9n)6+2c%˘qESB([V&k}ARWw(YCu7OgR`z]l}Hi}BFLsdU?EBnn_M7:U@2k<+G>JF9'-NԨ: e4Cv0~ N)A#a0`$aPNX.1L:~Mg@=7".^ؔ&-qx;Ph (] \Cn4Ce./2'-IJ)(o<ϮoΫłH8s :A˷?aM_O\&wj 1ףs2\~vvmU1{F1 \'_b8,w#[qEY-, dSK"#k[r}Kכa\nlTރn4. FGwhm9ӻ6׃ٖq7VꦾJjun)!ezQX>O2|E"B8= k_Tيr1Q`oo7C˔%cyx"!qh<Llտn骂ahu n@Hx߿?ͻ?xy F`@FgF ZI\\}_Ļ?"r_3<,>*ypG!iQcA$i<Դ\ tDX$XaIvB PiWh.]x|&2&Qe%G)peiG"f_D_%)|>](z,!^x'](cAm`vDWxn CO]y.XmD7ncٱWy',qJ5n#>P~)Pus4fi6N7sW&9#%PoR\OP@+dSJRDЇSaz5@C`Η2Iv尢-Ưrϴ7?5: ?+I5x:gq`i){DX \#/$ h<ҁdOd 쮄Cf˜b?ҔɆ(1|^ VyKb:½UK/h:"J b!tmUt\KOE8&Yq Кo.Z]!:`ԑ\qdڧ6g6m3DDciy1fO / ,{AWWzCWV-Sc:Abj]`M7tp-]+D)I+xCW%\ ]!ZκNWR@W'HWBJM8L L(e<ܯ΢φXŅg+ gW崸Fq:LpA եL]?]e,3J05u4LImJm,ufEfd3H\b CEII~N]H儮{Na Q?XD=.}Oep+ο$+oEч_("J0cJ2lMk_lZ"ePHg93beN2Ӗl,ElWR_~D;FU [lXv6gmhtu\A3).Qރ;%(_="exp>H.GlMϗ7)/s XECijͰ8;_ ^pfܲE0r BE5Rl%4.c$"ΌaHX{xȊe?4 [;~ Qd5ė'uyU҆Ӭ,&ֶ 1ַf {[k*kNގ^{4qMaZ),w?.`Z[~T箜'$/:dTZw:9.#0M3\| ;'GEc`o95Y#FpɗU/z7tm?\2iet4VV~<~Z\~GW&z3\U0zTfA?'ϠhWK+Pݙ]3VE"Ϻ /tz0j<2RMw/}) 6o}B\aݾ6Ҳ?yGw]Omb+ά8*綎3̊{fne$%C6oHjG!gGs!L720,/Bֶm ֑OSz0k/]$Tܾ\~^,'CN[Gx* sgx'QQsW=LXv5rv&Usl*[e-#;*itgE.V;S tfn oX%nc|tI0ki{lk |U P28^]ٚSo{j]3U#YjV)U^3] w)Gt5Tt%SNZ ۣ6+ҾBveu tu:t۔&#B jo ģ2 ҕ>+K/th:]!Jn]"]RD em+7ꌭtx*;\.B-^1]u|ŇL26IxF3+nY*}$WZ/FGO9 fh 4-E B#*hdLⅿ7h[G44(+mӡi ̧Gt'roVvN]!`E+/th-:]JND++>ݠAc1hyCQ/toI4QǼA:FTcK KWg]3T &GtqS6{jcjuS]=]1 BBWVˮ4" Rx= j. ] tut%'zDWR ]!\1hR1J)] ]ITڥ҄p_ ѲkW)ҕU,t,`,cD F7뷲BrhgM#\f|iD+TiQJhiZkFOtV=JxlsgwteԆyDWp ]!\@:ARSV"\ ]!Zyˡ+Zsi͔>:]5̹<2]5+LW#P- wa3}^q:8zY#zCWXP*`*]`+{컒u@W'HW8hk0cBU׮!N ^>r=0O~ޱw;X4gHR MgW af=txmt_=FI,)ƢA1=f( Z4XC2 [09rkDWw4QptexEwOG33X_nkF46xWyT"Z҈Ҙ'҆qńGt?605+@i8;ELvY-އ{$s3` ^-Zvb:%[D-HV:H Pԕ& OBHT]}9l8dm>ڊ`Lzv2j;j~uv`3B]^]u1RW@0C`U"O@ݎڇ>*Q)qBuE ɓORkTWK)Kst?F_o9~Lhc L jX/+ad70uŹOaΡMM|Bay=򵈾rKM1#SŜs;٧4(k7?U'@ҟzV(ƶϴcנԫQi׏J%02QL|SWUA짗`dd> "wXJLqld`炔48Jc"i,%rx8_3WcXLr.#w_#rHW1Q*LL1jveF-'&'1Ef|lxixcٺa 0Ĵjt* OZ߭m_(K闞Onկ54_Kj{Rk39mYVaio6tTm/Fb8mD/>=K3AǴ,h.`m|?&CcAȒ2kj<"#C! VT%E@ +.*.VL g,#ϤDJNcd fEwZI%HD0 AP$@2An n[y $ ҶxT%u:\|hC1#`bA&dZE&|J%Ik=? m ^6$HmͅάZ iI.:9qDhQQ!A\He{Aڭ X=φa< )0auz1mEZ>&FܸqCd?ߜN&9Sɱ``x~t:U@25Y7Hgo~\~ Y%.Rޕswt\͉gbX>g+dMV5ȎS j Pwd*ꨚ <; Χf+aWiCL@ZX.*'\6t>$ e!XoOO?/aFP|btv^LQdsMv0-CKW2ݰUTUUϦòy)@E~x可?<&Ë e,UG${۽\kl޴jڛ5Mۤi^O|v5e;~]-\ݙU"S jH7:c9@u]>Em]/ng75#i 'KV*^KH-ŘaP]iRT Duoy4H)ʝNGT)n.9@!HzL#gGoyGT{MD.u>`rf8¸ykQކ7|gZ7Xw @EH{甋T[(dh)e:EDID/$ Z08'v[:EvKNKd1Zd~$< C5yBG#J_V#ԃQu|יw~sTh2 qzLh1t4KJt,pru6š'ZfUgAOfs #񱖌&wN=SKy0;ϖ_O+?$p"hPB%i[J:(/7:2E*/Q ' A7$FJr4fD#k`A'9Q U`^0M:ki4sj6L008/N6)3G<(|  z4*[/vV\9cA `,W65oN[f+=hN6>Fqym=&Td`<=:o;cƌ΍{-#XhFs+%bok8۹p=\8+ J{w`24I5X>Rv`mVR'`KckN_2wUrX.^{`CR5j(qCvY`iI(J1,,VJ.s{<]|SSsZ ̆PֱL4lT,I#IK`ߪ_vn_Ի:Zr2}fM_naU`IY-5%㨚

;Plgb-GkQ:kZ(#88>jK ^0TK1g&Du (mr|4$5Si=hQ]UnRT݃߼}*դF&j6$|8Af jzPRw;X7\GkXcMFik䲃nEgw!@1^P0vY4a#A2?9HysQ b(@,KL0v@|u6ߞ{a/|l҉P'. ֩O|7eNz٣k꛹g}*;v$탽W;)&?y񟊇 5klg=!L-ܒkӞ<<$^rS9+J~|0ENJ9dsk,QO+L?"Y0!9&r6ȹMȰ, -J))$ fWsnݼrc(=nX+Ӄ^?͠C ÇӭtTI %yasi;W2:?` y,![nu O)(ʠze:pNt$*8ʝ7\ 3f4j|sWTk%i=*'*/ɕB1K\)ZB[ϣ˩<_z/ε-:6X{$1"09[Ḽ܇zw:&j[JQWx#cq@ Jby(*QK^]%*yJu%?%ЗUo \ 9X"VI=`ڧkR'QD QމPػ`)B/ a#8!"k][)(Yu+<-n<ݱq` =˰ܭǖJ5/ ˧:^/TLy ˅v"gbsƥk 9roTv+>< -_ŵy$60崗BK(DF4!-X6%HIVa䐤2%jㅑJmC!(8v0 #xǷ쌜 N޿s>*jO/F_5.PJL2H}qLIJ{Gu`0)TF"C7GrkhILC= jVr`p:8ƈph vRP| ջGdTaKYX@\5Eyјa3{,]]tcRyg/OE=EKa1DsyB1X(Eb/c{$c\]ȏ~Ѳ>n j[ ukuaxXrv9\!r -%& ?}#>3!.y?.zrL,YydcOPB+Dc`3N,s<\0l\;f ]W|ŗ@]c ]Pm]5h&$k/=v"wvCXL~+tjWn{X̐(~Ą+>9 d &uD9hkc#xv/3R*~GAqlן rE1޻7 @h=T?r 26 .NXT 䀑8S0XXn1*_\pWM&p竟!Ue{Ɂ1%򱸽QOZֿ &Z:qk%b̪!A/ĎJmc7 턷zc XDMWoT4[ W= /(2!;3Qk5:XT~-.~I>Q B.R\N߈|yB(}\ݗ5mB!(e>g @9y_|*xx [ym3]KC;]-95#z3rH# beaAKM-a$EV 1Ɯ]96tlj>O.Gתm󹹟ޔe TaDS*ɒӘG"h@QAsg4 #*$;A H[t4!K+ɬ$*yb3xqھ(z3~W\`NZU+ޚWDZO>,]9F Ay fuZy9P9cAJ ZNF Z$℠z!7EbU\ngIkީzEj|N{Ӎ{t #Y VLqld`RAD1VRF1HcQtO =k$\(LXS&J;fVRk+Ggl)і]2AO<1/}nyrXZx:#zݞ*쏧G_]@ /k=,WΜMdR K+gh #Ç(XE+% s@ s>]~63gS^G*#RNTD2C̰4N+!ʓH&F < pc1`^A(Yw[X\ IuDBt 2!ӂ-21cT -qNZDQf;~K6N )A;Jj%ҧ%E FEqs+#YA*t0 l=<_B!ޠ7Y/FLgwiNFƅ?_e ?suL?*\ % W|9֩VxVf\%e*OٍxېUTjF Pey}lɔ̠Irc HV`)"#tq- 3;L.B_ճO%P0 Utbo& -,IVe<zK?%͡0(r$1}L !FŰV࡫r!N } M!p `Q ?a#=#J` {7eRس Nl0zH|p&߼ka!TWSyXʚ_]]]5Jym5SaOЃ*>f=ywכieV:kd]um*4u6ELBR_NIȆ1Kq/p6,wMWT>]ܜR97qCnԇhMG&;4>dGnT S.RmBNsE 2V6xHPAn Iޏ֮N%TlgT./T UEkT#?\jt \v+Cm ˨#/PIOŤ=$(Ȑ3CI9Ìhd ,S-aE:cPAYᅩœ+g |d ٱ [>.Nii:ԊCj8R:JtG,KZ] ѹ$W^66ts=9J&q`Hj6KA\q80%*Ű[)rMeZ7@.G qp).}Ԗxa`̙ [Y:R8O>Րʇ[j6yX՝_M?M@ &Mx TH'K"#a:0- 9%۳n}CYBL,3К  D9kmH^XG M#hsNZ=%{fvjg$F5|=3hz֥geػ6$ ng#]pOE|V_)IICưe{5]U_uw}]%q.ts,@tα6$Qs-9˝R$8fxvLHl`@1r QC1F: lp6HF_jRz\Tkb* x.k) S.jSoa t}{І,ʤ\^6bFE&Y-wsjS]ח}{ҫIJKR7=6h..5SX/mg׸['%#Mz$@O.U#s-cA WF\50c`)5Ŝ9s˭#i22vXX;['´ IQ`zƌƁZ1b" lj$6yIV7lK{zPY5bcY}mqiNUGbcBb7:/+Dw$!7&Lq}:zHNla־ S݁=KJt\'/ho -i]iT4^1ذ[-ڼaM-kl-+x^o,mr0@3BV' _AݯG^WޛIׁK?i+w?An~Iax Lo_~|$ifv&%@  Y[3.3]W%z 0MQ2Opo]J2vpF5?}$;Uh]هRge՝_h{Zw)wOQi/rjRd?fyA3 Fcvן gϝta;=Evû2*v&#'k ##s ͑@K0n#OIqT4}u4D{IQl=N;[B7zf!% 3Bs46VMFEc]XhSno^}wEQtG!gunLe4P׃RESf^?d@!rF `hn)1HWܷHdJw|ʉBM|)ଃǴ;^VΙPxG4ǟ3:sksz÷ƞ#roZJV\:6p٪Ɋ v5^Eۤ KaĚƏ IN3j 5F$6qđhZj8{]{8+l`REb"7Qcsô2at@; 3zgdeN -fUd2mat<ߤ7E (6 1t7$ʙ߻^ k>YoQb⺧J36r{xzIάcWç۱7g#Lۜu} pgk9ǷfzQ1| lO뱩?#X*6Y#8ַr$I[}sXog0v>W*gVY$}gV%rQQDr]s;*,xi*-8$DE_E_!͎gN&N|3(jԮ8Oy8i`np;ƩXz?-2XCڠ?zE{^rhzT{[9B46't{ƂбXI3ԱX .ATJ|b%))nzb)j>Eie- ii)=wJGwn;d/{>Og{ C b LIBdDh@r%Pfe) 2FYCT+6^Ԗi8DRcibyp,P}u'#p͋C&5Au$8R_;:zIUV/>ݚ=ި+4QZd  dJRE;JS+M$I阧2}3 QC!O^c1kQRkazqyְ4&Ύ0!i /\S !&bQx"ȕ #1FDU:IAM /POMhBMGH :q`jTK"?M6 wUY~ vmE Aj:-8_ <Ņ$'UT@Z/.$)1mc3$UyՃ^3W0W5WzL4%ɸMNQ\4$'= UB̀_~/g; L0- N1 -Mp@L+hO}~rXj8N;W+plHC Xj&U #"$a C3ĝ8%Jdo>Yfy1fں0'pzjӹzHNT4gzϼj)GL qkgܱ5XTʲ'5hpŦێ+PMF¬ ==zh߽މQU0esI/B4N1%̚0rl4s Q= ػJ.W/(zYBOm' !F'B7@\ $-ǎL%"0BotyW>ҽ_DT}#Q8gXs ح==H4?ľI<}1 ,RV 2LwZ D佖/5MVHKDcDe3;Q鍇w~.b1ޜv2uy435, Kw(p NSl2S+^t0.ʬ8~6V!7Vr%x;čŴd3Ӕ%(=O7mQmkc'ڶnj{%n'mZ“ðPvNj0mW  =-|mq9飻n'kؕ$y}tHQo1|Q{k%h>7Oe7uu`.\C_#th5Z`(3*h2:Q9b0XJBԀEL֘FH;enHփ<%[_Q_}k@z&P,SV"cr ϕH ZNF fqB31%^E--nŪi.fl{R #SGT&BETUfaxx?\Nib&5b>>+琬q7i^#ECg[Mut4݅`C糷F絓*53+ .TEOkЗ7!&St|9.K~3;t}9D#[ %3rV @3qD0:RLݙ­ޝ`oBWսJ`!o& -,I֋0&3q[]S4BH%X1k&YQ' r~)HDm+vpx)gSBFi!T8-3O>r$9GEYxtTݯō7uu"0' * skvz۹.[y.Le~I2⽫0fM 5UCjšTև^OЂwӆv>Skk%hsӏ[9 jU0ՐT1_KEOF!,s|~ P9׻*kufb)#yMϿJA;آQfNrCi|槯dFme*_boT#;v:kPߧ\_%& ASjjEPO' ]oGWeLʮ0`,v1'F6#RHJ>$E6%RIu{U y֬OO3ͷo]Crw{F[4!+oLfv%L5&[l!X-bmn73RC)g;T4fS/w;w; Y[u,k̈́W~߾]n'BҷN)V$xfnp!0jlBDit56}@+#%D=^[#(/N0Il'fa5vkR*1<7aQ򚠈Pl@UL.t& "x%;;hAgiavzFTPL-LLaCudŊc㩜tq Kƅ(Z" Cpji ؔS45 -iECRBcPP#sg΋9I.hM|oO[o$NV: 6Mt<ʘG',.љ&M $oGȱ24oڄ8 BV 5'҉w!ǭn?^X41ܟGnR;1'J!$)qkB2NC`$"MSCvm] SF|u[LL|}vW ;^~&6i80#U(fQF]`RyɝAQT`IȍL>&,ŞҰ$b8S5{(SKJ1'xaB{QNΥYuױ'cFP$aAq"QFI{+f:ϻб^zM5퐚vv$VWOrVrſz.ϕj"`oS8?nYy-_G!yYlnvLlZOpW) L|(̗mSY:_QDF7%G7-}Ϗ/Z"'&}\ń 55@bT3*͌'(G3,'.ŰgliжІ/Z,NNZ4 %c ˶}d:2DRyd֌tFZu~>&0Is4_ S̻ 9h3Q#5=[Gg+mII8 ;Xh2,ёS"q2 񘻠 6KsQq 1AbX'-B-mNt-"L:n5)g׏q 8 a(],2 |('Տ)<';)B,Ob=+_gvSWsnt>4j9ڕ۾0^ٱڄOC-oj=UՂvVlJ{iju034fY ;6>7{#՚!nC:pNzf!̲.'b2&gƀN%eLD& O'O9P<%ڃ=A̲=9$vtď5C' OnQz&܈J=[46>}Q2Sg((J3pV}yVKɏ<{ Wf'\4S٫o/Aq Vxp_+\8.[&1寀'`JR̀?Jht~g?0 . .dD.ρG]|W?۟{5ȳaqp'yvm^oK_^sg|nx4l @훢;6gnu?+@/WϚz5dZ8ٟOJ񃅠^\c^ʗd5UV:ص.BkntL`RcX3:W"19%s$Fxea|? ́)F&,PduJS1ìC͑#'Gy.vhѡwinl]lX g9h'3- g( %:u4N)ٜT -XaÞˁ!EA3;WO#mӋ[ĮX9p|ty?mV! )=<$>y$ćifLY9OFc\F\kL?JRQiGi|4&XkY"v%K!8pJSo KN;o= ylٰ)pDd9v x}`v8RYOq,6:/ZPQa}8u,`g!ˍDtc姲#OۯlW9^,Br}ds尹t"8%(Çl+#*~bQ혊x1#\1'YhHkѐrN]#>Á[X5ϭOν_!;tQM%^ٲ^oz9J9= llgf8,ɀZ\j_S㴴Vs525sf+{ mѫ珬XRz>ո-Z$?,?q8&ɗ4^[ g5m:$^_ӢڝŤoF \|p=mj.sH&J/#uM d`*؝r7{ As(Hg8 G^kjE4[$3I>$/43 ?^v[ + Lou 5O͉R$ZdF`{EqъHt!*^sK9C 0W2Ү&̫20t@_zdmn KƅmS7x/&Z(+]O؆ZV |M_j|}AQCT%ڃ=A̲=9mq[;5uS#`\ zLeVr0LiaVz$Zt`]:ThJ=>XX8DL!NDO,hUD fV, [% ,r%p呜fޞ=ŵzֶ:#UHLPiGba4y^Wވ|j ́)F&8 Y)ҲTLF0Puɑ׽uu؏>i!'ᶨsVcn˧>)N_gRdHredwlLz"KnR5JZ2]J,{:bܵ@岗+F}89le/m6Bq& :4MX飁MG :th%С_#tҼJ,s)1ޕB@ၱR%c6(SK"I1ɾsݫ)D!xF{E~4J,`1 'q `g`ݽ9M<&W+P^"7UWT櫳 xl8}=|nyV} (tz7^>[=>H,IVf%ƖiQ:e Wf[e/!L>~S&#;bWÇ$*ޚXJkTlvbzQHMfYoi2 Yh#/A$F,B0#B``"{2""&ZHHXy$RD({=] o.]ޒp`7m㏃S0O[۰L]M\ȩ8XCdY&A͓8ݞdAu)ab;%(ўX Zl -&0Seeg\Ù13c>cx֙C;+33_1OT:HzWՇItۤR6vaZ{L'mۇK<4(,KCr+[Yҭ&qCjAMop;=mkXtX<0Fˁ%{f>M[O[3{ J;E~:iq[:6~7mzLA#ӚLԴ"ǡ0P.M;J :M$[_S0͊6M9o+jOǼ=m7 Fۇ41\EdR|a !DsZrά-W*n1ȸTJ+Dq{<<\o>̮Ǐ 2{Ґ6rѺLM.U:sGqh=M/gl.)9~}$>1VYE 62(#X6*GT KIw郗 Qqj@(_:,vϻz'kGryx{N"6u%:@RK:yE%~Wf/!^6 ApdHY,x0k5f,`ZFL&AJVHKDo[ٴb6_ &Tol1ʴC;t%תbbU}&kQ[n4]_gUM%ε2ɍSCʱl{TG5=i_9: J˺u!Eu`km>^۞Znu"4e7l [:nܘ.9i6ױxטu}8;꽜I7_ ~`v;KI{w[O.e]n9X€Z*| ZNF Fi%谝oNkma+8أ]=}xcAqAFyDh I`\"L jjqaD Xu+acz2 ǨU1kQRk^jFtvH!aq5 ,Ý^]MP{/r61RK1\D =QR-<& @pt)X+%yHFǤ"03,*͸Jb,#F1cEP@(pYs3.ϵP) [&s2)h!0R8ms$b&g!va$HAR!%XgVIQRNiNQ!`TTa`72r>!3c!0}å\0=$kǴu:6.[So%aچ;Si ptJ;Y:MÜ0#c( jt/EN\5T }bŕ,`iچb'X+RSDNbNt.d:xyl*a@WiC`\:$UX?]1O yqh(fi +J8nHբloQ0Ng%`!N K#K5$~ zGߚQ7jZu¯Vݯ\. o ;D \/Q>\+K.lO׳ -|ia!5b#1~ˮaH0vH*e}]U V0bWM1نvJQۗ]9 GSrHjRP\" S|am*bO_oS҇oDAE"JI %"·FWyFkf;boxUv `?<~?7߽;DwzO]f%)GOGM@GL{C+0^;4Uleo,j+wqԷL.}%Bybs9mT!/p_ߵ'<YyAѲ-qP c(Pʫ :M°=l 7_:]ۿEf y4H)ʝEfz/0v#Ii,`' I8c`O!.h$$X05Dcu١{ꝑVqH#mQ/T S.RmBNsE 2V% H{S0Y (>eu`_ LAg2puNEcb?~(먼C9Eǽ2OyfzfInQru~;V|:+!킵 )h+?|LU :Yk)M[5O2|V4]ERW7ꊘɛW?Õu7K?'7Z[]gV uEWԌ2'sP'?*i \BW5 [} }Fs@TMEڼ.1Ry mg61AѶ[hs}nT*Ysj5-h05۟(jrvF`ڮOޖo>)>T4H@陿WVy A RQ"H{砟@g}%]O5]g >jζ%߮4f 7 D+H!g.-.Mg+_YힿzpXB2n,Ť]4]ۻgeyݺp{cl8f 3p籠"3pٴ8g gD+/G|sɣ]Tc~ 0`#__ؗRu2=XZe & ¼&BP5;LjnDh:vF>x$9rr>p ڿK٣dfcFW9>9>'ud#'tNNFL0e2]0g0R.TH},. b׀x'(Q0ztoae[ 22σ&BD\!6(~0c;&bƢ o:c>S0?< }nxGcلfxO爑Z҈" V%L{1+a><,fͬ?B!0rl !:nj d)A0(r p2^ QfKdM ŎU#_wRP;6E. 8CJ`!o&<\:$UX]1u_ü Yi;K{(ql|u$ANއҔl,Q.۳毺L/.O qJ((M AMA-6 @N^9cʷٍեx TKU~b u|pY3`$FOWlC%Z[b|uKm͐f@lmfqa4.`$ǣu3ż`fpV Zj3ȶV1Vt I s>OʈWE1KLl\DT鮄7cCe~|_ __/. ܁Y9 $&7_`Ɠ*nݠ\]E٫xPߪ!F .ρHyOo__=&w/޼zr+Xip [݅_с[MO7hWMC{MTM] դ!vsvd!-#!H;:(;}l³n_Z&8wx x7 2燼[@EH{甋T[(dh)e:EDID/$ ZP@azuy*5t'nE&E['h w.CPuG{}4P+]@ .$OKψ"8 L1h@L> 'H !9x!c}9ANݲ4gDcZadPg($^2!YobA% 9Ij$fXǺhOi=OJ{0Ǔ>뎒#GJp;?olp|p9M h&̆#~0EN93XV8dWzDsyr~yC_wAFNb)Pʍ"VFudXDcZ)"V"D4nr~y9=svjc'tJl  qvȉw]ږZFG?F->n,o@bOZm慛DM'6Mܟv/[a2t(jc՘.խIcT$G5EǸ]0Qj09sH۔|'ݗXTNQwM^s{c=]0鐞*4A0ɵ r B($<3cĜE*,8ZI} [wýni. xOqrhfzx߱nsVvӶe #HLt`5A󁑤%Q$ B%{0yөI%d5IkYQO֖(Ux {.6.'ˋ'd[QsAO~xQJtZ/%nnfMc*MnEZsYI)%6Sps^r}$Qs-9˝RĤ𼥍ũ(!8L0˧a\XM\JMC ߍlNZ;a8lvTR<6>)d굃N -,& `-nJZ%j6E(X fXV\f{Z3e(llS-2tRD;mm^zm5t;pe0&\acfK3Ip<*)p֕j6-zǻeu|:;*V2tbHE53X`E,a*heY ݹp>؛ ?wd̊v" {ڃc'prnl6jW"@0sX2ļ ::Vx$3HD=@~0ZNmBW w*eM4 Ai (r^{nqә΂*ӲG.fb:G'G+љGݟUfu|uHԒ:NRᖃ:ŶGu7n\>[Iq&՗qCo?F+0=K+ViwJ€qn>j[.AQ><;z O^U7G?(&l_~=Rj]/[F_/:lG6<(lxW+gx8.zӘ^߫IM˷Z &ŦTlg>uRyͪO7?}jfMB0 Bً?>WXAxJ'z|5uAJ[C9K]ޗߍ.NRjͧ{<f1F: lp6HF7g)Oc.*5X@U|J{nV]|םx 0jpV&OuJK%JrqBhJl(! 5oa&Ykޢ iz\ybMGkުҚZ{,=la\2v MPkT‘hP&qx`-RV.ܤ#w]ܳ_8F{E> G%Vh08̈́ qƋL3ש_޴GAQ(->!k“B?pRMZ{5PFO񑂟)(xs֙Tz~//7GYYFգ[@&E-L''m/wdA_0q<z\ZI-[<=~{.+P;Ǎ㏣|ße? wU$ ȑQcsô2a#{rR'}|"'7 X?wITTq9O%zTea'sY>74I>C5wz!Uxd!RP|0*D-a$H8HwqU`z=4YlK@)ާPɢ\k^YF R ftA ,Rg2DO"k/%%H *XZYh6tr' d{O iiVi?;X%B{"-(D =yA>g1oJ=]]fOGKuO-tuȄ0ĭ|S0lMWb'һ:H6idGҶ h.t x)bŴrL=[Eߏ˔V\p-Qd2h>@Z|,qQ-N>=kb:oH p9|Il̬ Kn4`dե3;*2~3kL:EMTQV\iue[n҂NkWU\#CzS (*oGl_]?`45;SUD&B4N1%̚0r6KBt71EloՕ7UŜ_c;:@C{'I=])D+xp>Ml|ZֱKǐ|p&cn9}[-!KV>P)+"EcNPCn\jgj{7_!n208d'LpI.d_meIq߯dY-٢lCc/$n~ y*SkЃ;%4$Rr]),lHe~l uZHOnsu:ĸ1>v]%FVjqj +Q~i&6-!W{K+9[bHn\\q:4%RB?q-}4hZiH|ćI|bhD|T?dXyyU-Xiד)&{Ǖ=ʄaR`\XAzk,$D.Lj͘[mÖV:΢6`0IMˢVӟaՋtj/ W@˛zS<\LR(S-x|}A{Uu6/ϳ=l:FɈq=X NP"RH)Pr )j-`AM_MF7Erjf=a͢1LB;7f[;gF!}ͭ# '=}+Eovfͽ+vPf|>e09/_]W a4q m;^ W>9kzȩo[6ms[OØP}]xvUsK7i)e:'vz+DS8 q&5[ |9L`4kQQ SUo!M훚Q(_xN_࡟ЩnbdyVu b(nw(_~vJz6y'hUس ByB ݙg,ޜ雯Q7<ݒCH]Of5n|m0<׳X.U&bn>T}mOg~]]g ^l1Ng&ף'@z$q^yYz4jjnsTL#zO~(L3O?+8E@ӟW-TJ 3BI #"`}-Wվw{%wM,yr֋z,rB]h?0sx>4V#q eNFDȘ1)# i&b{xkB+FWzW=נݧV&@& mYa( +3eVϖ8uʒJ|(:h)`&=tai UmMP ࢎ: #b  ֔E!5qZ󨒗^u$_=~ݹwCC*SkHtXDI4/.d,6P2gVAd0*l"zPi9!q<*2%O^h>m"҃љ3h2[j}堽cų؏u> ]me0![ 1y/6)FR2y%*u"߱Q5%,љ3rtC H晉Fk!˴!mkHO ;Cܰ:|,uYY[CW'qRo5Oe^N(!1xqcR-c"2k$HfN~P~wyi)_տMC#(ʢ:ĉX % lrP t5aPNѐi9ȎntWGQ$Zz<8j=Af#DR6nA]*q?;:}Js8!¢>;e)r#k{;kzONjJA$g[L&a08°?(q9qtz 1/<ꢾh{g>:|z"0462̕|OZn%$*Oד*9o.^4dDcKBn骩 粱EU&bP'`bL6/.&<UFnuM6=&bZYHnX}0X R1=ŠXyS}kwĿs7Tݖߋ?^™EΒz.00H28OoQz% PSQ?E>]9_E~9?w{w78$Lu~ݪZlс{M͛6Y܈Mm>T Gnh킆[Wr!'bs?Ҍu Axߔz߉/bFeVR*(~ivI}TS`~ڄcD .Un+;>q0 {?)}]Nƈ')LNfL xHc!L@]wsgFfL}ʎ̊t?2'8$DO7ă8"N\8/8Œ&,@ M@258n=6)ҚNIS" ,נ=<I<66PCk_ŀRK/S,2opvd]jmK曯VoYLqoBL` rP"AjJī+lD4Ae Fj@-#QFHN FqRD+8dFhlbe+P:_ Y*nz&j6Q\Hhu,4pCiqu֭ϦQn dz!(a]X¡T.U'r5 Ϙo,l:=r-A5/Edұ=,jNtn zp6ڭ6RqI ,/١ 5K9.i-Dц¡Jk%7FPdpZa^|)xP[u)VFsHń)C3x#uHX1&)n)&4VCumh]wHV'yn}rC^O{~}Ai{xpÓfj#9Y \iS"hA'@b|LkӆX=;?Ѝ’TzgR'q0QQ1Hbpy6.$GmνlNgc"C QZ @Rp Iiʃ,eCry#jgj*ff`;Lf0g}ώՕ!_!('a/E%E-7\XXnkzSrS:k9FPE-@P+T#D?z iп3SW<߮{t L/&x]>qb5XOg~vymzG?osWڪ߷OeYʹB-`E5`h=#P!Ո'ӧϜsb8ɺ0.-"мW| dlĕ:Vߝ F=#Y֓0msثt˱ 1j0?k Ey3PAt1 0]A/2XSB|h\}0>K"c擭"O`c# <\/3ڽ̀ob0W^ܬ*$"1o^*R$ө:D2ފ"W.\W! @h'[& D"fZ_8. åh2XjW͔:Q25=uɷ@5{㪏~>LYSfA%:`#A@ 0Db<@S>AxqU"a~AAJt-Ai:M_4ʞn44vpV"g4`+$T΂#"AsM\D BiE J+pjDM`wsx/6uIX)>\c_!WWOyz.Cv(-+ 4M͋IV̐ Jo?6؏Ri~DUaz)Ы 钣*fp9˾K*Ptg00˿xMKV0#)6gF\-aLg]*8t:g@ ]DȮOfb6- nkPRBJ#+xt!h9}^l<=) %M:Ydˢ4-Uu6'eLzjEGt%C-G>Z"DWUCUQ7`$rye9L2Dzwy5vs6Ļ[oѥrv mboft,5 ͛?@.٭:}*  ,Ca[SipQkGQIE;Z:"Bvji蕭Uq2p}%a:KDlav; @L foى`SWf s6pInnB-}1VVg5vBt0\?9Wrg]$XO1aߕ>L )&!TNw &'g=hcWioৡQH" qRg+<,'{Pv'-a:IK+eiLS˺ٰx|m%Vnf+/O|@ .E0u$' 0Qcs:rG +<Ujr=7\Ámw'خ۵M> NAU<]>i}.76=BT0KMͣwo:d tt0wAK5dNЂT 3y5xR| \w 逹fYm#59 DI˫w^]$JMN"ky`O$;].A^݇0!q#mm‰byb.})Um+bs:ڐXnQ%NUǒޡ٨j&tfY5hNpְAU񻁷19s2QEECs2P-.L;ݪJK:M ׾}-až  ʳѢaः&ǔsI1Si9&:#\`.#RE*oݡnXyj.w۶zi܅gRvzHv`sq7 KKn zu]6_׽/K| ͆}fl 7`fz`ugluF. dXGh?}~>t77icUXr }\Ly{L,c05%a4UTsc%bDp ڿբXnQ)-M-, k>8P+ >WsM̝48!پA}-vZ -QWY*|y$7eG&v.jddЁ4HI`\"IOa,% ":2:예Ab6rE8„y$lViǬQFYJy>a‚ȳ:Ύ͂ƃm_Xk4dv㫩k:vӰySh{ugkY\]t( {R%GexOsH-i! VH;퉒b7$0^JXk»&yHFǤk?PUrAJb,SGT&BWF'P 'ҼwiN81ּ. !k$j-&ƅ^g B0Sx[i pR:a^g>0#CȪjts"'Vf?8WQL ,1; L&F"r2A'RP;1E. < CW@0!L@ZX.*l.Ø,a wh(~s?>3CAN?lbJ6R(_ۛs_.'+er8%jN.` '`Q W '.@|k5,"ʊټy _..+f&#A4_8_wꛝ]:$F/Wp-bka!4b#1y˦aH0`6L*>%Lx47sp9,28*A֋liS?Is .u}Ul4N ˁܕ*-P-f\}~.%繮p/*tJ~Ing2_%=O/իE.Y֬-K@̗8gn0|,z~_߽?D9 Yp N<40[C/hWCxT]]դ%7q7v~^Ϟ.biJJ/o:^8ώ]/ZU]n7`3*;M̰7 (noYmd *0wHsIMk_0?b|]:a 1Er,D!# g ) fxlC7dy t:R:4|@9")47 Yİ1ZJ)ieQ L N.*^g4q{NO'^c{T ` A]6BqA9s#3,&ësp:EY#C %r 35 Is'CS4-7nE|ϋ:شeJGꡁg O'}b?ݻ_!{>iỠ$]aKN\q8@ҒPNbXXH.QN*Iw>8:e)Ň %Z|SNq飶ěU /cLw"ztݡ"3*4\?1Kq]*͟7p8<<*0iD%$@tY$)R:" f{|a iUrh&$ڐ>r(An10FHH jbKFb)DXҠu0IXk8#G>v(:L{G8*|ŏNjtzſV~t7cF! _!s9}&~#ܲrƵSSN0iL3,+â8j}MiGAqAPq)ITXq2$LE!,+U ״{5õ'e]唈ׄM/ÇG,kG<Ҝ,!"Hh. 4Ma+v [}uUV T&t,qtѵhKpSCѧ>-/bRFo[zriIUbaZVIZf/%a`_xO YTW7y2S7~O~2n][oJ+D^7}`,p3/ ,}=%G8SMQCJMI8$%]U_uW}o&Ӈ 9?'_>圫i|kOe*rXi/R-9u- $w\`무څn?]."w p ".mm\l. 庯],y5,yvpn*WNT1z|PbņzmyZq~7nPjLhUW5M~`6VJTN3G5/xJXтapQ[zo^Zt(!G\ a#Q RV"^듿sBuγc~R,5k#@7hd"2q[.XK˭4c,Ka1%iǵA۪_;#VE;#t{fz-zSCCRiF1kDOm)#.Bq Ӵ ([J+`\a&^yKaJ4$Ty͒B{Lޢ)l8cXYҎa8iI.upQ^1 ac!x $iѫj⍲VGαbC Jdc)N$1R:@4e қ8? ~ʢ(@>H")iMƹ J e,mSeNXCQC( D5p!r6"e!8s-WJ*L ek1p-pMnBe< 8Q_///!Q0NȌ3++|,3eݬ}L*]<1],-ȯ'$s|z;J=EF8%8ZS[/r+No:Snxb}OРtsr[8kDc6DL, ~wu`s?av 'lav9 LjȫUݍBb{22#帚um}*YRU;9zw۸g:)unq5oyoF÷ѡjCWj 3-^Tiqw҂x,{%\\\! 4WX\oXDTɆrQtcM7/7!<9_ ǓaBʘ\p*)c'2FXx=qh})KKv<shstO~44}uvՠS#^5kӵb:)VfuFl1!ڜMu j]V>~o _I]bV?j:/. 0`cADW^ .4k<*@`ETeqTeiv\9T1)YӚ wUQ\n44Ppp'υAy.G9 JTd9T% |c^w%W %25$2gt:%"xbA"R0cb){{{6=X`L12YѸ,1Qf| |x_b@_b|!c)V)ft`<$NrigZDbQJ4uh㽦lO 6,HS.ì{bDZnz>-t}T0K7GAPC u#E 5ׁ!iD;8r$ą ,96)|YE؇>"CNϔM{.ȵ,F[cPY ET,v<2[9q2r=VL8c0e7ăqH!F aͥA [ z KҖڤ*ҺuT\+FSp@LĜׂFl+y`*Qel }NV*M>֭N'j59Tt~ K}yz/<9ʩcAJδTqxL y61S5ZNcN&:`鱶6aG[u[Lʺނ\wy//X]Kυ^^f͖)z3H9zP2 ȈK'-3XzZpirb:$qs^C/k]4E3bR9D9Y^x%٣)6k3 BKy!&DE^ p_ZA 8=D ʺmע@ÒzUO 4{ay#ui$3HA0ZQ ,(T{by *ZTtl@:6>Wk_Ϳ wksMڹ6ׅ"ݎŠZ׾ZGb`[cvzO|u=$8!FqCd$B QX"Cd|a7WJ})ѳ[[l6q)kC}~]d@J,p.e?nn`sKKT6bʃR uTxa$&:“jI|:|~lt 0`aGֈ}E_v4!cw {MK52:bE$SDok9 8P2)K-sl<kb2rƌca"Xj {"vNQYD$ÅXk#f8YԤLdA(%2m6c_5LO!9„홠`g*1@VGI$Jz'8r&-LĀi[X#!Dc[=(D3YP/qkp+t2,չկ7(Ȟ@=#qE ?4ū!jG~,`aA?O!PQX6Rw|ɇwovw)]k8Wx%Ӈ ,/ O`gwBq~\sPZ2HR^ש gW˼\DT7a@E0[.t6@\ju_XHY jX/Wc xl'=>eqqz%Z|esxyZnt<[KA߫%MW +Z_*n5ˁٜV ӻ?Ճ)P;kPd D5A\ Lhৌ/Pot5^w?(X2 ㄹsp޵u$ٿo ~TwuȇL` Xa"$QZQ=͗(W+Ki2E^ޮ[SURU*uE_Mm {v}٨+ם #כK*hM2ӱ[pCUYiٍ'lܥoo=zײtc%;DKڬ*|nx.rsAc79jN/̼ۮήv;I፼]}7Q|q+աo6unyؖNޠW7xu JӿK\Lq鶧kn[?|8p7\{gCŇdjgN'"&԰.C Z ]zH1>>8͋ĄCK/bcV$yL/_?jl0Ogz;owF;}_uRExUn?=O)߇vZrRTN㵑U.A% E4+ms$m492 N7T sy;uW)\c[YqI.9ùr(JLV) E?׮!\&.n" N'iQv=g Ǔro6@;] 6:FU r-eV>s0WE5*>lso^ Ұj,D g* 9P\DƵPBb&]8>4l^:wRZ ie3MbQJť8u`IID}XG~ti{MksjNk3(E{v"ck-%/k'VjVPO&ːu%Di.?*a}J:TĐwØjd%E(:tUξiU!^iU l"'0̪4CVJZ/3'DiqNZY W0 q~!T(%YJJ/5YUIJZl_%z/Q׼ik/%g[/ŽM=>녿RkN,cZC1yCA2-/Y} R):0q%H&^Yx[E߳n:^m +uT4qwݙgCt#plWS5I2KUȰ0N2'[1$!kDԯU|Z?{nvvˢ3At)gCdMS{du%MH7721w{Ċ9)E1]SL!$UC[["K"M&9x}=Tg}/ʇ2.Eu7:A ΦRjJ,,tCQZ ZR{\vE߬HR k t{gK&JʕJp !nfŽ"H] )?3R$rH1h:9or?˄Pkyxo,>8M.59L@3&k2+޴m7_ep v&Iur0#jtgy -N(9 MF()\|` L&4\L.aOʉ?[( W5(Co3w`W֓KR%|&Ce|ȗҾ[Kp`\ԌL>o7Co^5$aڋpu."rb'70LܚoNSu}o.o]fcc%1Ѹ>8 gWDi/%kOk$G#Y?i0sğì/P\gS6.6/~6>^0zr3tty4Q$F횫ډ娓 Q6]vIA3@fko7z74CjnߎIjK3odв7q¨&<ȋ Yd0?PEV3U IlAAIC˷_|ʽ-\.Kj;IpO&_O" |2N54[ m3499÷׫>1_(u:SԻ3Yڑnt0=]]9ql_[Žm,]̲c jH.@'0]U S\!EN›,&W>&G?Yе)%8K] USsY*r"2DSX]*PYRbt;3é#p}2_/ SrjɛD$C\5DE Urx_ejR.Ο(~IdVRDlYT |2ԫo#ۣLmZ[nJޡ<`;Ӏ;ӀQko*Y} ͅ0i {wcQIaGldqXUQC"=&Th(5{V_MBN2h s.q]as.o7{7Fzg#jK>)_pS|=uk'*;v/wt6ͺQ}s.57tBac:륦O?`z#ev'Du%Iw'bmsr+,_ZWi˘h,J[Cg"QMdC>zHZIuHIa42\>e,v z.BƻI9 u& ^}\ӵJCUg[%;5snO̹uJ]'}ߏ1ﳟ t>,]ٶlfH;HivF586MϠXRf?I{"&Jf"#_'`.Z:X&+ϯn6SK'#L" 'g֒ݦD+yHÎ#;vc=3p\SH疃ܺ8wNN!`6A xR\P;+pq}t9\iƹ U5$fOȪ%Dh4$WdF`fVr>r|#{C$G` IkV( JYdIE!=v73]̖FiLH/W~-z)GOj5-P儋NR29axIT4*mPc:x-Ge;kBtMtvzz5꒥ZI9*m!`ʪH&oPC'!ZrB621V4cVotLHR$5̶6H0'p#QS!G%Y2na6Le܅Ф4IB%cxT0͔7ֹ 4ЪrMr 5I+JmᜐHu3bH}.IHcThndk!V`|EvB&q(f' iU,J\rHT6FΉWxڢjDFz296O aסFIKSmRBv ESpś\í+8^+&-bo$k>I+u~1锴#98CR [9$47 9 r' 蕳RȪB"W%GY=,L!D`ˈOh8ZyE3d=RI,qo4x5+(Nl{lsh[Vt#gyW1p@(l F+1\4XLshCަE,q Y{}ts]K0 ԯZxÜk-B2PJ3+w%AQ1+@TJ]0qV v :ZqUTIXI(#,& #.Ob8۶X"z]|H2|3.WR` 1PIl *K$*)IA0 9ʖDM,d3i_~N+%ޔ]B vF#%\N>XD5^9Ka=![j5^C~*()EFW63 i֐j̻WP`eIنv:Fx0 ͋E/ڨ B)XANh 8X/,W0qpՕ @ql1X usj\KoJI1D%EXm=REy`}#( َtd-(c@*  @&bZV"T. U E:]Xe1GS^\'6X _)AJV6 )nEd0m8ڂv TgdNdDU؝fE ?-@9%\V0NoVx$ͥ%.ٝCLm]LՐEa}mym@ރ4 w. Ez1(6Db6y,e}t|QV]kR Eh/ʐ!4J6yBe 1C-B+0-cu6m+PV>4de D][ qEꦫ2o&&SjsT`PXah{y.j #d`as6C2|lhM3| g%"`":39PB^7<,dK5@%\ EeBB  0 DQZ B\ RCX 7lk`X7olWH>^,WȊ"D W}\'@NFt-ϘVdXة %` !EiF|Ryk?A[`pԏA!sKmX)+̣gj`Z]rcʼ$Xndj{v 50֠QY/'Ei{&I d:BUSEce oY#9kP*mA6tBe_j;J*A56\ Fx X8 dС/j _DbiDN ǍpVZA )|8v %!ΘI%nK^72ۅ9mِCL*V/:DA] 12 7+[KSB# "v% "!JԘ5HhXx3T]SmGA迎,nLkV1wE).Z(ߋ`5}8Gɔ@i;mtWPi~yv?~{oxh+(eJy0iHLCJˉ2(w:ؑ3D!uCD"Q:D!uCD"Q:D!uCD"Q:D!uCD"Q:D!uCD"Q:D!uCD"Vg8{@D p0D&BiҾ~N"چHKf6YPg Ň 2:;SD'LhiwRMDwI,zV_RO5 ^=PGȷy!Ѧ/%*eizMV08{U*DNI ܉(+Eu*q`ccYN1 ᡻[zCõyw{e WmT1fm?Z]wDdzgkqhE?K%t=zǽC+$ůIM-M^fYfzqč; jV1s]4ѝ7Ew|ZW\ֲPETLkNTmjUD&6QMTmjUD&6QMTmjUD&6QMTmjUD&6QMTmjUD&6QMTmjUD&js<䐨ڼm UjCZ^=UR%k"Q:D!uCD"Q:D!uCD"Q:D!uCD"Q:D!uCD"Q:D!uCD"Q:D!u^-QG"C"Hq8D+u 毞) [$(ɵ"uCD"Q:D!uCD"Q:D!uCD"Q:D!uCD"Q:D!uCD"Q:D!uCD"Qu|5ڊo~e)gcj<GC#0I_rɢIl;_X )$~;)vיi?-] N_9<(Y^in4>@v**d RnΖ x7I'?#]bm;1: 0 oag3C:-|=w7IJ:;:?ӽDo;jY=o(#<] гogո?.M#7C9)7leB'?:s{U/oNE[GK* ht9#bfGz;g7_~惻JvRNsb) 0 Qh2N[dSr21,Fe5'{"w{9h UokvkDo=^ӆ O҇{t>;ʛ;,Vg?^!WB>0[04\Rq;ء[*?V`Яc%!4`}wC#wą9|UVr,Fp}/W*? ٖaV6HE[5F/LvX~Hdw+ !CvZz;4o~wZ <[B &:mNR#Jpr` pB'#$z HY~ Sq:ee+R\=[hizMZU%-eҔM1"X&'k4߬c'mD8:f#}aq,z«w0"^ҿ+_ S&Y\03Tf.MƯoV]_v@4ʸJYa5J^1o3DŽ㡸* ) 5!Δ h p7_ѻתo`W'߸*t0)b7]EeIb\zZO#TR<-!ɖcZ]OB؞8CL)AN!\`:Ye Zpd(QV1c;e&oT'?Ll1Ԩ].TRrHJC.%kSAT|[V?('*E"X>% Q\OA yY3\.?iGpc{_!-=9f0%.J:ÙugHnylRHr t5sa%WYhrR+HSؒ'y@DuJ[ĒXj66BOЍ李J;X`.>eHsJ 1quNj?oDzUx]vܗ7DgڟL|gū;.p 9s/w}[z}m|:lIq .CuyN[Ndv3{|W5ael!'O!]>[ȝ.ô`^?D;߿wwtf7,;Cв[j{=4~e*Gk-wt2۫7/÷k{:nZ0}zNJ5_;֛Ev_g?fؽio#3) _,\/!u6ߩ$E=@{˘$svW_ 말7Ro4nA`\^q xf6=^| ʥ.mɎv;G}xLK7T`zQ*P cd^D]^u)j+d29\΅kc*(v%06# 0B:u,'VWx|9A|q*Qu) ٩8D ]¥6x[_m4joNVأz'ބնUk7'ɒ)jI UdV\Vii܎g_X!00Vz(0?_N>.vh߂ERtfmkB (̥R`} :YхF>V9CLY䊳VP= P皌Ԃ9&1zMvgmn#I}}͡KS, ~r*gUʹ;%Ԇr);YΝ3 .*K~+$K _tB \ӮӒ>teXyD`M\=Ul>-^Nи(J[-#)VSn*mM<) FVZ9]=u.}^oicB z"yΰ/[ܥJ ٷ&0$ e`%:V꯽qLל%K)`0 R,vIR|)/tk荳V՞+΢ޙ-TZ$J %0T E2a 1W; 6vZ}b )k5.C*:Gg t>I-1&#?^x ,cae N-kNF?~43C-Zr^J!]PQ:;ϋu (9WR#uI3L]_ZG.QE<|4^s-] +AAjĐ¼&f],dzqgY_sӡ2I~rɣm׽1Jzzi qLIv~t덖G޵5m$뿂KkW!q\sSBPS"(J)RB"Kittt]!YS_av391`zfYRG?_O|ѷ㶑4o#] C[aL*Ȇ8W f0bǣD1閗c5^Q)>%Fm{WG-DIHKϏ.c1NEVg5)!ƇU&PX~+~(0(k袲Q&IfQ(+xrYc 7-$[^U_q0/~puB>_ݻ_>?_~`]Vh(۟wZfC3wZǘ0]r˸X(j*o v렪N`m1y5(lſK%VgK S*zzhVC!nO6Y[u,k̈́W~^nˋ e=P^C RaI$Rp!0jl=dL @\ݡ##3M*Q#|cX0Il'fa5v8CTbyn4Ŏc"@urUձ]wdžwbC=dK眨 6` 1 G TZձig*_ 63.u: 5saTO,Òqa)VE-H{',A;JfqEd<;=f^Ʌ^b=G'h|4(>]D!>/ghS>-r\y-:E#>ɜg@#@%x #.͐N].8r 9D&s)ɿh'>r,(LkN)[`y?IuR1-fBHa-,(#8zP!GA1 Hf &NCt(]+R>LR}qGK͓_߀p84^T:i8P-U(fQF^`RyɝAQTk(7|L+݅X={=H’zsFVgQ0j/a9[ȍ.$V4^ƞM0g$ Ӕ52JXރ9KbV kKZ/i=EN"zb >%I9 wp)+WJ=OTupگ4qa4%yi%7wW'y̧@qymd8x,JѤcHbNҐ~< 4/ JB=N`iBR$7Oi1`+}aN׭3w\ń 55ŨAbT QPx qXO;ȹ[VWSXud;KǒMm]7QK8i 9AoyҢQ_?T~n~KZnv-fQ;L:yx.ry9U"DwY׍ >'OǓוof՚*XhuқexKHo6!On}G&=oY՜M\c!nDƆrMSTNۡ lp5a>`WäPLvYGNI7#:%X=&MNᜀ A(czfr]©IXS#FkuNTIY9# p:o[g~]ܵq */N.wL;RfCu|M6t3f=$h3oQo=LV:]cW:FtL`RcXȜ |L!}> ! w8QyV܃q9QzqErD&8 Y)ҲTLF^q#oāC48 h]?=SxȍPvKK="ȼp .z~ppm/%[x>@s۫k?;-@ɨ"s R|Ao(3ɀ%ĭO%80!=B@qf!RtZtN-ѿӝd7wrڑH\ OЕ8Dnc--c,uLqߡi7szy5&ma?hlZC'G@Z5F`J- .[LߊU~o"E|5hAá7lb2ַpA! e9froʼn>[Ix٘ >Zxt#.Bq lf >-Wbr>pkFW=(JҊh/2 ;y`CbJ4$Ty͒S)iLKT(ElR.8Μ6;eLD"F2$89;ϵTzAեSL#\Zl .}:qs׸y.P”I.upQ^1 0ñQ{#DH AZt* @MQ91VLbHAi%XJ!o&F\Ҳc錜 H(W.,:ڋ4*"&d "@]& n.&9UVe>²a7UÞ<}[{|L e`cZYý71mMH= 8ћ9؉GUx-Vݔ~YIǛko/dm~_5k>C%< 7 Pu&Xɺ*~*YיZ=뺢Y/0vauWTO?\V!FX^W2ЖEIXXŝWĐH=F#X 8reOG\X2nMT3 -aVۉ6s]0b-]o9\Ͽ ZbW-lohfCfZ| Nf{<+UFa:|T+VP ֌p89:퍧7l&8c# XiʔT2RQR`=/ȨrHh}Hi}"rcqrQ[.ZD}87>7;7"6O˒%E,Y1xꕦp@B 2b Š9MOLeG$[| rÁ۞vP7Fݷ LųnXf֮J$zJms%]M/7G)7{O^@ӒC'yoJݜ X~\K,Zp4)|-U pl^.!Ķ5Ϻ:{bBCD˫S K{u.vm6yAdGm:.tsGTf,iI˭{MIn,- w쯓jۂ^߃aF=4_ob$4&l:6*x={\M7m3L f8p-u7{Dxv9iD#8_ 4ێGܛw56LDIo}=Z.EӠ϶o<2>l?q؟v8 6\I0!t6c^so䭴 .YKJ\(rR'@d 8:xAn>kf몧kE֮y l>~s&D4yZP;jI%QpH0O`YyfOw5o89fOE4{8`I]rk&4*`SC)f''Q>Fh [ `gaSIolVxP):ORРzHǶbgm+u [XrO>1B/P΁@4?{ȍf2Y,EEbO9=q[l93`jIv۲'j˲V Fn$*z$64u4OLYX@+oJa1fxhCS<W耰quESJ=0ڒE/,u]~@\TZC(~zbMg[۹\E[Gn%a+Me%$WL:`R+.WB(W{(W+y~ޥ"WBprŔ^rr%AV4=|{7 \;RL.^I|<9?. ImUp9\NeQB*UbvܲGwŬDJ ʖE]WucjӨgVrXb-Dh*sV5lT&8|yz}`W>tr˽JSP6!otyQ&!.PSiU\SkPRU&i(x1.hX)FWHTQ$,&#,m*Rh|it,4=+NY<~0,gCnOgH+_]{dYz+Kӟϵ-VHk #8z|`}}?Zsma>P"M;&^:~p넖(8G'֟Z`Т⤃S좝 }m5һ=Y|zDSe`*/SBk>Sw{=ޅm:Ӣk|vgu:vŅ`x^<a;@V;uӭrSSqT O$ aJgSSpC2Lk75$ 6# s`/Qa #.з,v] YY=ꮈT_x[ i7X VSHXc񀂋Z;_taͦSe*sv}!;ُvt!%lzUM-lk2r%S+)ah7Fzj"{ȕ" ](W{(W޲\ pBAՊR+?tJ3Q+E\1\!Zġ˕PZ;>i1I{Z͞]guy&fmſ>\Q-GV0,>~u~oԗ:ɖM6&ΰf;[ĄX4.1N{h@oYLmA%n? "Sj6J;3Z7NllɱR!/m)Q>,B#mdӽXίfl7F[s~8? 0:tXa>8i67hM+]ISe<rU6č39uMIiYS:Q1̱.E!h &- J-}L_o9,GFѨ:b.)Q `ae,"HzpS)IKh?X]/mB55кyM<$/<9ig%:j{E/ay'w_!/}!*3qh{q}~}Ri ,~S 40;/,[)`7&>Xz"uE~_211VM r[~ZL" ھosN;x`u.rry]z&F/ vDզܵDuJni8vW\O~o^fGsqbK{]zڢM 2͵n3:+2 +!=k~ϥ҂\**ʹ㖛P@{Ҥ$tBrE_ ̠i} D?>UPFW :̂kT*r%v8~5rzMՔw0:FOIv'k\͟)MP ɕ;HF'#WB)M(W#WIi\ WȕIE6SjFC2\!;HG@*r% ]2QQkcDVQ\Mr֕n<>Xȩ &Gea-е"ԅFuT*_ukBS+rP46Vuԥ Z?c]>ŵVYr2^R (GSyԪDW u04`kOױMlL,m,MEѪ` U>^ASDTT! ]I>zOO:ӏ >D7`h9R^Hqg1trˣ){4#W<(/c \ %Yt0_tyIsطO6T'؇ddLWҌN}:+C:su\ O"o.W!HMJkv~$@߭\5;Gkws'aY{ȕM TC2r%6"WB~\\9cUBr/=q1Z?ѕPjau;GO&yXڣU+;}PZyʼhr,&/0r*j1s3DծTSSQpTT58XQREB(=XlR{hm:cS*]ٽQ_Dtwr(@+Jp~Ƈv~2K9Z) j;r~i5͙^h~:Y?Fvd-l()fC7p6U1|۾ڷӨ_݄L+{qWV24JUSQPŦ5dSuRZf`c\SYmGS|a@(͸ <6+NiJpѥ"WB.~<rB+\yғ؅ҌGQ0/}B 0AԨã>BR'78Q""Bk;.΍22NiTDȕd0mP GG`ͦ&:vD.*hw5 G9zrԦ++&\1V\ -˕P0`NHؤ3\TJh\ \\IcBr%+%\1mЃ ѣ\\yG>Wxr@( WR"P<:bиA6uh*tT"nTUhT!\Lbz&ټWdY}q~|6:St BY]ڃ3?*:iF5ޕR)e3԰4,[ 6X{b,*X춄+<b0rc <tfzO0XpSAK~!S%ob?t6`?_]̱%ҏNl:]•a57e5WQtN9!õ*j#BٱqqdG&MOƈwge['^y zPWZ7aVܝc:'-R*Nhi{BƃaK(J)ѹ\1֣o.WBI;ȕtJ +u,&C+ 0!PLk/s Z`;~f{ʴt&KLv#6iԣL6k6* wݠpлUݱ\5;Uā0=ʌr'w)A8$L.RY ]gh=Y0hW-=> x?`o[dH`A5tJؚ`TrV.Y˹iPA.Thk*_5J26ʠj UQcԔ%j&\-),v\oRYBK~>K(ø>K+v &gH&gN&Y3Z[.jL6$t^Ic$3zjdGJ2cV~m]X0^4`CL9 J6׏қqSlYBUk@lnN&69X]/ٿO(:r/r~d_?;:|HU֙^0Ҍ߼߻ίnyh%ތTG; ϯO?2vhewfOe+Ld$THFElE~_"r7A.wˏZ+>ZTAVڧ;ɚ; ϥ__G~W<=ыd+WA578( f_-5r?etf͕ϦMksEG|yf?fMd/i+gKi)B2C5՘RqjEXP+lBύJha(W{(Wd\ .&s>RhGWBIa+swHWm!WisP5i/w0JHؿ%i5ȴB(}e48b~r |ͮ.8/p?ZQ~v`/\(WOmz`t\1U>\R+U~H\\me+vٻ涍lWP|@}Q2JU&'3 hJIKRopEj $2c",9ݧxCWW _ GWZ㞮ΐfվU%P_ RututŵTxDWX"o * ]Zy(=]!]AC=+ 3 ]\ ]ZyΑXa&-]EWxhU_%g=]#]1VX52028fQ"]nDN174mMT h\P%wDOM aGrfW>h)),t,%tu1&BWVԪ3+;ѕ"X]tp7th:]ʮ>I<+,IDU+@yOWgHWp}J-`ʨ7tp9]+@Y(Jaq@^Fnoͅ!{-^Jfq㝈J)/\һ?^JY^v{)eeYoe_O>&Ŷp9Ҿ؀Ȯ؀kw)v+)\i]YڣV~+/s+Ib>`ͼ+ W# ]Z[@IEOWgHWZzDW0E\MthI+@'gJW_a}Zrk&i+-ZIiPthZ8^\ >qktl*9 .),w,%Jtu1gq{CW _ ruBtutEyDWXIo j ]Y $=]#]Q)e>h);NWt%C=]!]%i K ]\weᎷ·8BPqZ M J)[CO?EE6n$?w,Q#LCQ*i쌭Qq1a&D"#8J@I95d8ͨ5J81QRS؎+CTIM@qp_d\wPpG%Bk屜Y8l5XrS׃_J_^GseV%!5M}XʅB2Y69N}Bh!|+7tp,Վ$}~t%%DyDW`S !_ u];U+tXv:72"WL]QtA4fSZ[*|*-`II.-| (bi`>mW`?IE+qke]IG DǏJtU.ǘJ%HPR1%Jtu I s ]\!}+@h Pj9Q*(.?tEDWm㭉j+g{:b pb7_Ž/d^PgF39G=VˍCI+xr̈-dѦg.X.e4ƾ"Nwܤ3tr?"#x{?IMǷ8~1/rxl]B4,]&5!ߦxwIEyda‘vcQ]SNIםn:xRra$$FoKۈRe}C/{22/\}JS~U{09@mhř` py&HxEFĈE*Lf"C I@G˩`J#Uz b"gmm7[_KZ./7Ss?e_@xd|Y_޲r:41v6n>6Z~ࡶWHPAL%)&tRE4Fłp/pJ׀., Gb7|fh%ZƆ@$߸YsKȨhԇu o!>' [(_kb }F&X<+Yp/~瞋$!gNcl3T-I֐1iՐ rƛ$>RZ78n{(hO.+m:uT&ZUE٩#XpF iW]&bΎc>jVPV_rQ  7`,ƹ9x)`S477v`, ?{W.\={pEX>L0 #noP1Q(9:A eh$S8V*JiE WWbs>;U_tKrwݪ5^gy`~^i8 ILε$ 9?Kɍ u9Ḹ65 ^vx}gIaC- Ǘx4>6?-Ɂ !1?*o~yAwUZ^Mr,J 苒:]ccp}7+q\8o*(ID1f))Be <%2ȬZ21d)D(3qbrtU[5Z;e]*( F:XR" E"͠Lh+|l#5sT@8*K)xBˉ~;|I|C_?&姃d9k9pqWIdoț׫ yeg9Xvp6ׅvDj9X7wR?A -$8bV//W BR4Xd?:?qŭGV*J. wtS9 һxN;03}0ϩR9^UMuawӎ+T{sT>]*gZrc#.81簶(i&%22OMBlg,JDJ_M211M(~TfVv%{ܴ";uyK#qbۈ+*eq⟾lw};?p OfK.J圗1KPUV0չL"O#R|qYY>v[Μ߿e-䨟9"h*~}d H$pHY D(U(zx6Mq8۟/VJ\!NUɤʄPqXP[=v4GѸP-͚z&4Vօnm|Q?)௅s6?P^} RFa$NP'Nosp`kω\+BBh #taCbm,V{}O2ҟ^\,* GzKM_?-Am7I]T{9L~yky;BBG@]QV%^u]ت?Z~,t9I]+?5SqGyE$4uj҄.KWƃ7c;zlYBtvZ|l->ol7j#TI嫕\9k]hN%bY,}>ԉ-oB/iz76j皭pFO#.wl[bty ee۶#|Yۙv.w]q3`0hzwXam:[SYfä"vZkA O[B КK]ݨXw˛ &tMa3Ѣ cuF z.Gv b^8][\IqҒt9B=ΛSHf ^`Gg+h 6j *jjp`* ?No~4xWPքuov#XpadǛy%6'GuiX ~͹Jc~KQ+tВTF/{S#\ޅvOD,YxLvSo^A=nV`dOC D8b&s&;B)j 'B%:9ۏ?6W?fN̕}7cݏJImG{8`![ m:vljt6UiNQW7f(/ Z)>50k1N& }4>zIŌD.j*C2vu{OtlWfmṧlS|խRW8t;NMi+$ |܍ylj~ 7]T 948FQ9BasZF ~-,=EsZS_ޙh|=lir 3^kN!ۼo/10!\zi>p{t"ym=F)t1j3ܹ(15$DET(|=@j\oZw{KHy78g#jq O/e(dfF-(i"C;\<ţgT%q2 &F\>5G|vqz6./ Μg "$nû|ysQBiz$U\KPʧ@aYSg_z~q;mGW" W"IX6NV+0fPƽqU$/xV@[`3Itjϻc*H 6.$>LSm-PEjCFD(L+%,.щf.aMw5}`YTbJMϿVE+dVY9y:Upֆ*qZYfGYj5 X3.6B5Ȫ}'ާ~ngX)ՀO8+ј9rxC2sܴɓ,pÞMJ _T6r:&-$;Jq^e8d`FRJqGFfm5 Є{jG֫k)[u Y@ *X6kKoWT9֪ea[F܄RxbQ'h;׈$GݲȯK$(,&]wthr"}૟~x݋+hV7l1 pv&_i$ӐEh2̈Fqč&Wi&a'2OMBR&΢DDd:aB*cbPΔTf H?篠-Y39-ᯔס~G%vFFns1[kw-͍Htqc0;v_4 ?$Y.I%W9bl)$;:\L|g~r{O"~Lǟ "t ˕M4(BHzR]Fݭm]rKv2{z* /BWՐ&dXVd "i@l}.(Yy~7 AͼlZL`- cV[@B*R X+Jw(b<ɘNAr(PX.8< 9LA#^q+S`L| r2ugpB\B̭Ǻˤ?`B 4ƙ!!=qeH_*Q'XpodDZk%,7uL eߛ1HU;ʪ=vXϹ0m~}BH]R gU%5.bn+X:$F4\Z="s-:cnzA*DjHTP̓5t>*م7PppȒā@q֙I'y *ğ\Dﬔ ,K+Byc=&$'v c+ػzP7/R`vNF/[ۊz݋iCҭ*^$Ǚw^MtTa`=vfVI(=#m,=;x{vE.jcvF]8k Vv8ᄚPÉ!$#JSjgP.>gɌc"2-.}t#;0f&?Ś" }h}YwX>~!6nٽi(_s@Bj}QX]%z#a,-U$~fZ x Yű횋S&2"'\u1UZg!}k ?G!U!W|nϊsm?A垻qǬLF%N)Y!h"he9L- !ضkZP\l#m-W~;2)`u(K`Jwb"|%n@'<(D5d uךx̍Gu f0UUF{HswaGPE-sa0lK{#*ԿiY! e5 e0~&LxחeWF ~I)2{ шX;E!%k?baz}|Z-WqVI d qkԩG;,H54-^@s.cy0?@fZgP /# иI1Krty]pr^Kt"!Kē$G*1c 3j9|}/l@[wI >:Tήh, 8cV:y pe󊋈S/G>:MyfWfv]CumJPsP]7;.%(Df'i1,Пw[,WtBk1JP4%b4eSnŀ G{֑gbKHat(I{ERh1z1Q h0w_u`>l%c\uqeN:GTS/]&W;QYX|%5;m]a1xJtkǛ|UԖAŦp'^-Tf;h `6d|#Y [+~"OMN6|Y*Zk-[+x7^,ynvWyDiN rgy"iW`s{Jz߾٘",CP̲ z& p2g Qv5UX*JIodl= 0kB?ϻX+ øV^nOævZw2n yЫYK;\y *262e >qFI38[S앶G*?խ5"5NutUސXf:p0$|3j,޲y>ˋ|S1X]MiW6iSYB@6x_ [ )!Ow9:{:TFy"C0^$GԁW#`MM2}@:4΄Z-lu޼S.޷ˢŔiZ9(Q qs=Aأ<}NT)IM %hBT ,]Cбi[_mvvJב2v\Bn.z+(4;+0:_ԬCF/ЯI?@ a$=A!Tci2eRDfk*(b MGLK$6)P*E%,tU~!pwޑlybJ<\VA]L4拗r]^{+!J-w! i\HdT%MDMDN1Nڿ ,VlYځrSY ;u?* qBP BfY 9A8vX2>Z "m( yVg)& f5WT')\IR@r Rr 7 )Y%LsSe= Pj&3M %4Dۇ >>ڎM T~<%ҪǴ?];VD\[MZ5Ql|4^a E/S0$څ J['2KR9a[o$:&qAj'|Ժe|:~X%yN&)3mNPV)K]x*{ղ=R:yy;$yF1Y';+K` F 4>%LkUH`Qڒ1c/tGՉ$-JrZwΐ@9ѩ4:R@Ɂ[y@okuʹR^wL+k, uH%Ѻ&\jTcMzͿFg<}(S4wN'OXӃeRR-T~p#$wɏ?{gJʁ^swTi1 a MF#*(].+&FLsaJs8媎<fIsxgѾXFEC[.uZF$[:('<`^dF 'ں|Z8(qR$Be8Q8=WS)H)t*Bh"v| ?VI9&7Ą L={w'ѩƘPYܦ«{ĩ}(h}ZHwAu5a(!BkEBPl-BX0$PJh >z=VJR/Yeh21%tDԏ+*UQ}:EC NA}=@S G=VhYK#~Ԣ;\p%yn߻$ᆥ&XQtA^&[(7h@\DO#{Ӑ5aGQBM .}%31hۜd3drjNZ)$('Rf2KV\BkBdh-[ <;K͏1sWNYzv(S5$1?+Vo!㇏2F/OpG U"a6Y44f#^!` a۪V!Bk:@a?A\0Q#z߇|NNGGpxf1o-\#6$MK h-;9|VNJ1Z'L[h\l(}1Q~vsGXkNf.R*mÊ&LK+R#k%Bk`EOETZ+KˇUY.DJ%~RS\N:e ]O,Ũ <6-j{۔X@ct;vST8 <1P=DgfuF#c l=dX婢X::n#`fu8>j ?cd:ǣA5|6v9g*\ף9kfIpf =0?OU1zmN6m/Kzl? ~45'oP `2.#!*XיrL CQ}Iۆ0Vb\A+)ŒOD bGI?a+89f_a`ZǍ;e:F1jxY^X5 10m7gIHd#1m} 8Zu>A"Sݙ**JAZ H^Fk"?ofrqI:AA }gO߳m>ˠ!i%ɑЏ>tT @ZB+sI9(;MCoѤG 31mvm"iBlsr\ɹR:Lph<>-IRpǾ/1FH%ܞJ} @HL饪;{#,mx`{M_ Ge?RRQH$e/g|D?~oN̉tpٿZxw״c.HJ5-f LusQGmZ@r'Ÿ"*.%)DeQ6xjC<□2;dkĜ7R&OJ,iWף:6!Wb4܎qB˨RPWV0DsʚB0ρy$^_<7;ݜk\3LUqْFgĝ̭OkFbX*A_ninsE N6oƢcƤvl([4ƣ:ŀ" {̈K(O# 1-?$-È1 \$ӥMXhWjI;j89)2+\DWw<え\d3%-<0s1]?'".sYDȝ,?rs|:;LgIO~G7TH˻PwY?c_mQ?#KHC m{׿Pz >hn wJX 8%K7pTSSģWfPrIֵ:o8s#O|L\gV2ڮng(R/΅ަvמּvstg2(g]L"ݿᕂXW05OT$?֧DU zhm.`XSٷTj`߄䀑 Ao 8f%{4fŮ>)$y f>w/DµOis1AMWD`ꙺ}Z4R2 {uAAoe-cH)~ao<@)G^>0?-eib%)"9$(RWn!L*s71% ^pF#bf').FVw>T$P4]2//isF_ ͔|i5Ib,>jlw!(%(mχc0Xa{Y,v7b ʓ;18&o@I6Ԥ㟇g =,RY8BRnT0_Mt~UQlj/o`Y[uT="uЮ@0]5q~jz}EeD!F3%"b"A0h_)(Tw# %׹;(|?_e߬~*>=J0$Z k[1b^ LL_M%z[h?S)^ۢK)9"$84Ia.a0/lp .Lx,son2%)rȐ ISXc ?fk6H!P[@-O-W 7Siys֘a>T)_}=6;?I3935?sHbpT[dJ#5j0+EI^b4q^ %f989 >fin8^Kz*}1õ<제-kXvX_ 6zI^BAI~襈;GzO0Ohw)(IWX)Į-iTY@i-C2BF5V\'p1nʪfBۇ=Sk^-3ԫ<3f.οi{xSI`Y,4,10Jne`[0ÝZbcTǡ:18!W]VM.[ 1y\yϺpW Cǜ#Z?ܿ< W߳= ߳=k~:a" IH(h(!$E k"INPN-!y2*v'f S!xfi/l\."$F1DG35b`QKnti5rS,;`")lRcɉ &38 CIXĵk@jI/p 0SBVJ aq>T^UWyKpoVa̝Ⱥ*d UrɼX [2a˴l4P1Rt$M) 4nJ zq]3ZcȠY.%G&SC!ȖT.R|bѥ%4Ǹy.Gh5ZrvDSw63~λ*&I!<ıoG֘$m~|˄zfiϿO9i8p<.J;g˿}<= O(+0( QZ QPM f>Z? eAՒؾT Xh[jF%`pҹr)(4X! :߆_'FF O$,F <f:x𝹬{]O AП+`„1 /s>@ȃ9DQ5e/jhˋ٤G^ia0^Nf۠EςjO~;Y!4CkNxPFCst|oTǝ",~.(:cShx5OF- `i慳0&\ )mŲ.Z;',_baiӷ[o[`b&K#MAh&xF K ΰR'4f'{"܎48؝d Aah$bqFu`5k^PHJ(6YBGTZp[˶T^LUT^LU3S<`RyX)儲3"Pbp3yhc=V<$T<$~j&{]kl 6pxY\LI8أ4:m/ lJ#Agwm%t!bų8훁 -ʹ0X, !Dt4T g–MXm]h{>Tt^X}`nm= 3"?f?+"Q2r wrV4Ys!M¤x+W.Ra2% NFq/S-jZ̜SCzHulL{6ˁm^1SeȠJ&Ho"慷vf+f첑yu1o-x{ҟV9[m1cJ d%A+U B*=|Z/J5<@vJѴMÔM55_|K⼠JJ;E?fJ-[RMb LҘ{==zOPiŒ(M,MWpOZ-C$1qVڗ Л7T)*P*W@*\5 Ti_Q45p0H4uk'!! M"$1&D{Ӷ8[K*[K|1kl'V¥Q铙Vs:͟61͗qRtOu4W}d"} =Tѝ =9A{EHGn B9+,+9?Pqnu#r0ަȽۯOUu;,IqL5ϠWkYBdFYE%3yrߦٝґMt:t2b N4|ም [cF 8 |<.t]=T K*gg~>Fv<WukzyWWPJ_" E38XeO4,fAL}DH|9Kί4Ҷ%{[[UKq|/dC1B-xY",HMr``qk5vJsm`*sͮX&Y#,6t`»H,0w`D81&C ̂ F|tV+jR1*nO`_˛pJiSh"=? <ːPEE Gh4~jwN:D~6 䓁3PG-5;.7MV;"_د1YFtS_9x N?ȠT |%u/8Km ZGEm[xĹ~-6VP h-q*Sp[2Rz#wj/ ]`i n҃gb8ji ej$ Tp!U8XrwkVzYyˌbVb %AXGWV W4!+^6*A7#o 'ޱgs/I5LNՒ[*@`J&%$ᔴy4E^ Yk$ 0,^ oCGǪa ֏:~GG^:=䬎enE6'YmEjNk)p7\ tY f |si߭OnQUKt磊0v7oobv6ELkSh Eo֧ܝ3C"8\@Q-+4DT?"ۀ,2Z cm -ȱJN&D7s7}NOP|yIuXkܗ)̬Oٔ> r$& xqf '2E!& )*#яM󪬀~PĻkf'HUPE8pf,1dg\'YV5NppQMPBK'ְگhØДu~?+;*EB4ٷ7+S'矆*qvq(FR28BRHnEud4[<ml bgB@*݆]UR@|m3F$-nĽ=,fhsB3Ehߠڽ;"&?$"=+I `4)x"nD&q(DhE&]C.) !<5 KmAMx5 dFҙj DWa]wsMpS'mr %"5A 8R,',iIcxp`oc[J-Zsgn%զ]\N ~*!E!%rqk zFo4XˣM2#ջEWYNdzE7ꭾs-`Vi-$);I6Q'7ky䴔 =YL,^9$#/~F-ĴfAQBA P,GtȖ>Drf}нs蕎c-~Ze`;{/(:ƀ{\+0Q`wN5:[)0GQrZ̚ D>-/~=RP%4l`7qe<j1sEQS=-f ѝu;~6Bi$kPJ0`3KrU.Y% W͒&6ӊ[oJ#e+ clONĩJ:bQPˁNTblZBU،ϖsە4`-QV= oGQW3 *?ÿj>o** ,'aspNK Wc|'Ejx.Oۄ O%5S~ٙMG6g@A $4b4VZEL8iR !X2tx;y/wO{bmҮc*8KA0LC6+c!NPU%.dfp-<{ĭ:$ȭzJ$! ݤdoj,XdxZ4~*r+7^q B16~yA(~Omf);]2#>&ـbO#l86lN4~m{ ׎{ ǽe_On-^W1EeN{ب>?b ;1$6-G9|i_U{V[l$ xE,fl̿oɱys e٧Q+h l4J j Z^ïɿQOD`BLO2-:/Ɠ?Zr|O1_*Քbqޒ>y%˧Dy-E cr3_.HOmSF;.l1N?/A~~Y^e}X.Njٖ,fGkv7mNRfq$ ui%}@2ׄ((8v|}Gų vns{yQx)3,M$" F̵EC??kw.*,qCdj80qfZy]pG m^2RH&cU$1hc5S{!iB 7{0*_->*h >AqҎDmY Nkle"(nͼU˹S4/N\f2PIbr \N !j>N/ x{zΡ \q%9˟@ӌR Kwc4uD'e8bO`۸)"`PB[9xl2aۥu~6u }iDžec&û:jX~|-Ћ]/ K HBp'Zՠ{8J9!/M ].zb_m B! ;+"IDRia"94c=MQ!?q͖%AS15l_X/Yyh͟nfDmȐ e BF@OMmrE++P,4QJHE]{L809^8 W%K|r}MZT_Q*b@ #it~Cv Ũ0ⰷ">`p6OGRf0lrz&. cB@ߦ7]G8NQ?ﺻ Џ ޼pL)d;^8VRҿLevLXEʓJ˞ })W_ܜڲ?}xEv ̾vU2ܢi>-((>xk'V ,yʔY9jo#%]lTuƃ^+*om7Rʥ{1P]9+O/2b2/^6'kx/܊5AI;^R4!!-mf(w:.7AF.цDރNn5kQ`)A'\۬4]V)FNsIM%$K똔alwM8aiGQmEiNۖaNjHX][ z7*^w-2-^໴^̮k,<}`1[7_\ {v;Z`##N,ɹ{FOq؆? ݊T3G!f&|ru᫏۪SceCn-+'?R~fvL{vI8Z-9uIEt膺M16o?"_Q!k%oW=p|YV~RTqRI|cTtK[@/d tݏqNd 5<)ld)̢]}!q0ʎcaTѫ{4  cQ'qz Dǹ~8<\\_95̾v?͍g9!pFk=Y0H،d\|j ! gjuq> x^hr̈nU-qvPD/ e6PV /nJ^Bo+츹/1Zj~;TnOfh#'kh}[kWC#* [;N0pmE&wI>iE,@FU^d(NPPg n"T n)KCP2)fs6t $m=fJ1^{jQff[rZq`9Tg';jkA;rj̵y5^6Mf1._f]QR+%#3nj{j%4/W[n ZFcbk8 j,61ܔ%ڱ3al쮜 FK#^$~K..Dz2kr&-maS{,[/7vy j]^|=#t 9Z/3{$QZ(T>sjkZ*.s[yw*H 6aڌekLN0<{#h@SVǺX%Q6c~M-[Ek4DJ0z\ 6Dn1;!,1 P, :CI3c{6=V޸f˳&<ĕ'<֪B'4 rp𔓿uҝX U5b{dy kF0k㌵rqW~W@F&>3[[ck%7gDŽɯ.ˢWڡI7J|ޮCː12`$)hLNI)ty?ßګoӷ ;1^*m>9ijHJ^xH .jn_Ә/+ ) O˘Cw0Ss ?j}0+)IJAj^y7wYI G4J-7)!CG@[ZXhC*X==,(d>_3ȫ#χZ pk"iu0NN-hpvz5¨Wڵ<2 |`D}k\݄]@dc̛tltAG-Kh\'OI({ƹLNu !8MJV+a;s1N m9=m&UKJXD h۶(T+c<"Z3`t&AGT*V1R0e8ڳƈSZ7}#@o4wRewk 8}`ɠMZ!YQqJߺ%n(b^= 2i@1# 6Ÿ^$去.gī/4NX Ɯ`&h/jHhjBX`t3J,Jg"diHcV1'I-0n!뜰I`[a"# GD9 9Fs5ju% Z \>W*}q$<ؒ6q]Vҏ6V,'Uby ׺5S170V֌$b2s~NWerPA=؃v򏼿O`v?f$g|g㮯&֝ƕo4^H$:";'|Rzs>ZCl@ɵRY#Hd2<G"в@B$8Kv }7{]q}z=9v|& l5ߞt*qћワ>Ր8PŀǤ56ҝ>mFh!$]iVqbȁk|+%UU-԰*EH͋jX?7VZB:֢c{J}7Ա~Y}UOcMq \"\}ɕ6FZD uZ_|B2)]+)ҬXcKkPhTCe <mcrf&7w'&CQmzEyxM&%VK$G03>0 #sXrMs@fh>`ĀTixId* Ow99h'fS(Dc#FBFI%T̀$V mm*u&騒 \q 5Q#oQѼP!;n]k焆{"GG)㠹97n35̮UO-!6}דYI^_:χ!ɧ9=>c{:=&Ik{z0s9ɣ_2zBEjEsND둰eo|>| 1$ ? )P6d)8`B"}8!'b\ ip;ZsyڞLJw&quy$#< =JNQ ~WHױy=-I:I;oS>g1dfάcF"KtҞw1,]Bj5yfr6.c|4A"ؖߟkq~ xm|H#F>uW!3,܁Zڢ~c+zf8@ ;KG'Fǘ,pEb0]ԟa֊5|߬kzGĎnnQ**95\]K~jKRAzQ0z3PX]5\Z(zѫiI=w*87<)7XcOfXIڕnܴp*ŢK7ڸr :1ҟЧ%Р\.xέ):@ߏt-fНt#=9@岘~C"0S{]CIIؚ3_k5{_8,εt/1=[HYc3tV|czd/Bg}k1$N7| IcvP Ǝlm$ q#9S%3dsTQd`y5Η7%'+}|qwÚ'zq;3Djo$@K-:a/4P.l.5gntM+|h\>+&u_QI|Uv E`UWU}UH>F,MξNfy]/2 rZhL: TPpl9FtgkwZ.IGH(omI7 ۲V9GIq iDֆēN1o->)o$wPn}Hsv U%&l/Ab"1$H!|`4S}1ě`{yl@X5߸q y~OƱfA$mNjW[JҢŸqO>[nt-[c־k3(<(;ʣ莺>7dmYI)IlX1L A&z):(IRŘ@z .- f9+氒ȠO*qn>V9Baw:e57ߦIlf"Os92(ܬjɧ%3v9)dzA2\sH-nk kG7 5f3B5P3:`g@hU[I9cZ2!]l 얉7%z:#6Zs,HPYV`W3jj&s1Hy::>t}cM7}̾љ4\A'77PG4AFk d<6қ$# X Q"@ʷt0 h!2JutL(QknC}JbX@"SdqHԳF_~,Ι+J\qfԒVd/Ma GJ Lk'ɶ h!=$csfρ= !]<0~;rvNsW黳~vN s뢚ƏĝsRB)Pwr0a&& [ jMh#㪉PR * c^Cnjw}r:[_Qzpe=0C0?!R,q2ҵ[Ph6)pٗ9FooɆ$RM⾺dUJEbƦPC*q!ʎԹ,_Ka񺛤FNJb=3y߮ZMi'9G=ҍ,^wnS9"z\? Z*_.fyx2]~e@6ycҤy(4Nf`qYͳ?zP貶uTO7/O݋QH(?wIQԯW'L&^o7m^.M~ӫHf|k@tPB{؉[Eh#fռWG|%MB!cBWџϞ[+1Y/FՖuC-[#dMCqAΤxduu7!͎k)D~5k0 a p\ {tw,džUN8Ci{F+V!b: j[Ҭbt+BaH 4qln`L'T=|ː ڙ|j:r/ie^e_i昊f\m}d"B9]]ȃ?.cp `OiN\7nlњ!)]֯mWo [\ӿk}*~?^>\HxqL:j2x}(WA>,~fNO:n+}# 5B~:O[NĘy:ŵm A#{8(e +tftl-8"YLaTgi8e B?O]=|Vfvo:|r/Gw]<53y`89 4=_rGT7&T!X_UfybWdsG<2ر7jd8H,h Y7ݍeFV؄`3hfq٩\;=,L7Fz' sS,o~uؠaw݋qgڧ4in$9A7/ +'\Y3xrqd Zg5wn5XoXZ`X7mXj]8`Wz/ƕ[iz⢧yό!O4A݊}$#]od; =e#<H~ yz~ #ox}E+YtT#lf -~@(OHU^.ĸzK"@P(7+A黎zg睿92o?}>!ƃWӉL*]H4/(y.6Bo6okFƂ \HB kbBOLd1/70t~O|ı5Z iƒ_t||ŚVŕ`T9of~>_d!Bq/GpH"c% @`ø9ru=5Aq5d}. -op1ԇ04`;aG49nEwN[6.6acىUtْ!ɑOsb5?ž./א@ A!bKs:->*dAg-+/1BD["2V{f!ko}mI7p}P_~/No+..(n{uiE7 㱃F-}<7IkX8S7ϜA>'~z^3 ۴K E*6^u>0.ؾ$=Y pFWG"E#rKSSZqiΘz[HABf ;x$<'~I䉃f.g:lHxFdJYv+C֫IWU<c% XJV # d(_Ė-QkT\KZM 5ҢimZ,S*q Dzpu'p}HM.C4,nڤ]JhA( -,1I0.[Łɾ1&LxDQe(jHBrk+O'+RhLkXFO,~6IQB8\jdAW/*:\dLUlFBʖu2Cpa I|#! "闁ı$o옚i I7MPDlps(նyu dRK YɘPĞDo :g*&RbBDŬ˯ڪcs*2-S.g§* jiQH{s^K&3`YA~~>YQP=X%MURMlWQɊ$W<_IP h ˋ">ϦO3]3c19gNjN/7shHş`/?6}i̞>H}SK5[)-¾FL1,OXjN6˥ꞫZ=(xvӟ(ܭڟf8>V `I7/iI5>Ge&-.n5qqO} z3^5YWڱa~%)8f>/fY|9}q.% UB~rUƦvAAܾ\{fl5(7ס3;R5sd qͫ79 fc23 )՘FtԍmF0HCUB.dRj4yN}B>T\}DQ-X?'EmR܇_[{S2)8cu!7Jy ܡ,X+TPjhNAudͬT9RUc\;E[ 綞+n6l}xd>7 nSS/J@Ow)%4_өU\;$*AS復rjx袑.SX_R:t7Fj(H"ݣ{ iFH3{Gp}VެuԽA _m͈cm΍J3"|¸sKD0 &7;l&Z+|ojCnvE8욝fgUoι{(' .緟D-:ͫMzJ<*Q#78kYT | a-TFJw-egx>#Smw({ Y֊ɾyl-8RBu<L֫7pyqIr@hdF)*76Dg]ՔwԱK6$F xxnbT3]Sæ1*s-f gE9}rS͂nlw`,~%Ηd[ϐqj2羸|X`\vċ q1qbL$=&YZR~{eG{0~lٟpŮ&\-3.~b*"lyE??j#?ci&!sBsQ>|T^i ɤѼb!H&~OM=&Փ-;612GzzUg<&|Uׄ~uZy(ÌgDs4PAgRINz=fUOl#8hRfGCkTaC)lweJ 0{B=! yp a&iG0{0{B>fog l%90WXJTm۶.^CLE\b/s66n21&1$ẙ1Hka6R%X O,AES5ϔ5yBx6| ջb{I@DO[qlJ( 'Men6z4gn>[Ɍ({  Q;q{,C _N>!8Sp~ْioz|DͻDS> C0[#gr[-8pFjsA̋n6;Sy6e6,(##=Y@4o?埗Ӆ>-k^-'|L>d(v~ %EpT<ԍȚ|Qs`26f":@T3`($>l_qQ;)Ŷnp9P.?o/XWm?Nm8^Hjh)`{4詂u-i,=w`1^O=.ޛҵ 0cBoarR.+=^-,Ng06ǚK |Ryݿ 5ɑ;DG'k&p|*_~~ 9̀CJ0wEmOvoA9DSERam[>YF8(n"=xo=fD 7j*]aJa|!7G4nUo&;o!]Y'*=MO|%v+;H.h%vFl:"nK˩-E#zӿ+ؑ"v:xQܣ|oJVbwXEa%v_ 9 +C%ꎖC0"+[ݮzq%vi1RzEu/[= { TZlI֬D8MyS"l=+E|5W!ԛf0SO%f@C r 2wDlanѴ](T9RgLCido\Md'ҽl XwSϣ/G978Ξ^\ŷn:&q]1saSXL7=gۥGue*S|o,`Ri0%l\4uA{ey5Vl2?[ nh ntqX+ۯfkۇWgeƭq]ij>DRRݣG0hai!}Qh{2Hz+ 3Z2bcu?mA glCM^b$ט?_n= ]K!^`: d~|[Fe217j0ٺNjv1J.)Y՜9Ƹz0;uhFFr,;-BQ~6]{! p4;@?1eӻc@~2N^b!/S|-)xg|011yDEvmVb9T].b6%%sYQЖ So(bUʥ4%C55[d9(di)d‡޼{_|+n؆vAnyZ;sЁR>J1̃R*$i L3hwcl{30ᫍ<ڬe*Be2ԇ̆ ̜]b:|0V+3 7%Q®oC\ٜD̳Ň+.X o|˻˫vF4Vx:5* ,L)hsl3Z0АFSG' s`B Z$óh_/~̅(z%SxSK|6"iqA+0|骋Fz3`3mtٽϼ01!t~Mb*?ykp٫NEw]w>}'rt;9 Kގ8WNh.]Lna=K0%Oяc4q{/Gthd&E|A O^oy>>{fgtsb#iSejRk..~BvƐwut98=TKV!9#ޞUA&PRiv%zΜ:/03utvC.|2%q 4}5àt fί1aM&> cskUf5s#5I>ѐmr ؗl8ȿĹkJIƐvϚh4׳&kJC/3&|eH1IN7tL+U^Y8wK(,'̏ FLhŞB\p#eAL=Bv=Ps0?}v)SeVқ=V?4Tؖ\ hNuԙTFZ+M;E/Bˑ|+aI&DrFP ܥ8U\:3˻7^a3IXٻ8n$W>Fi=l837҈z(˶~&H,vU]6EAD>q^Rnz= vCQ v: 2S4AFǞG7Y\K/!9_yXb_cXcnjF0]m`(i©NlCx!B٠Bh}A+9szPGi. l0ՙK[CF=CWQy@6emKϿKS}bW\8R U;CRǰab$A4_%e.lo> A$Ic+FmXg[84f\B 0%)e5Dy `i87w$4M#?>/Q`Hl/"U0& XǝL(B׈H9\L!&'sasXaRG`뺪jq3zh1렣&w Bm% 㐋8|<֋ ٰ5r(F~ț^}j7`Bd{VuLe:8|-z šl䌡z4jt#eeM9k)ueek|׹·8&q.'[t~NyG3lw]zn3qluw*"ڎaeN}F\'5h [b!$8` wuq#L?n{ pxՠ~ {0NpyrހozǴWKN 6qcKNulTiʑm exx̄K ؾwR4!t˧V&pE;C.:(NGGBd+7M!9:#zMal'!W}әX;y?ɄJ- _dTP[A OwObPfI{#0vInj@OG`~cRЦGN`Ɯ~^ O09kfq?`?ļ(RUSo]AU^\TJj^bptM?)7N;kF.2i ةG՜U ǚsarٵgrn@qđoS"o,1w)%g(n ?P?ڜ$4MĜ}7dV>:`N8[rRD|=%I{$IG$ ޻~&wн$n8{$bra?CO؇+нsN{Wp?\51(FSYSs8?tw8,>-($vgŖ|~1|&߬6W\G f?ɞɲƇǓƫn$mi{npdfy" +8zJ:A6feoG<cb7ohf߮8$GYc0V''efj9 FS UiKjIYw i޲w[vf'5;]zfn7fQ;i 'gn{Cza W枧@m8s<> Fw*T|^ٙ.թG<BC3?olzՍ4CD^V@2Biq?6ej*3xV™ 25-.nҁ|<̓FVr>+S0\[  v>#_-?H/vIk1zij ܃&k:W3ܘqHӢZ7X4M4p{8ŋ$rfX7 U|Q#$BM?6*77Ǧfန7bjN=frU?Js-u1ghp˼[A~0AfKHLu}-~ j0vLiHjR7/Y3p5b%Dt`l L76X0ۤ}ǡ钦K&AM߈It4 Yřs0 ڈ "vM+e5b^(O/]Dxhex:AǗ> uGjY׍݁|aB*{4Φ&]Xck Ҳo -;Ee}b%F#-#,_חo/ L Im~YKx|WoDC2r'.dà8XA=qR0 k0P}0II0üFEu܎A [SmlB΋}CrcKF"4r o%eO"ēHv#w D9~X,aI)g`1#:] 2F"sl_V5x9߫vOgۖ?0݃&U#w[ϸEwu:ֱIvkC]a_z2Y98rAz#!|Ѷ~X"p蝙x6UӰl~N(Z{3_m8as]էDbc}U1S,cHcٖa$P'JiWd&ZwKտ_W`E1+f>\!rT\)PlsPqZMfv\~QrS=muJGg⌽đ 37"۳ȱ@Z2^ұWz^6l;uO$W&:۹ H?1]lW!eِW#KdFRIjf40 hPK†sq(hTgmeb+>Ao]J;u]>e}fCۺNl[$}{Yzb[73afU 1!T3 &ž¡ì4b_ʼghӳf>ޤDk~{ͅJNoq a'"@=2 tAtћ c}ҜfGagǛU'M[ )h $M [7SvccZ6hAN$)R hANoլIݼW#  oQ~AN=UC O ZSrڊ| {$[(Y 籜W|5b77!g{N;r=](Y vF<>n jĕxluu+__Sm~t꯾6# gew_r]߿gfe>_dޘ]?ke'`)yn-I8RPh)D S ՚[$`Gx3YM'=8zLv:ѳ$qM OG#_-xpY}}ۚd3nETInpc 2ypoɄʄCӻ1;@d>;uEJ 0r\;mϴg}>ucHK\tymڌk9W웬 uzv86g6`؂7Du݌Gzw0 *>F~Il#8ʹ@A[ vqI wRVp]otcD,1r4{IDzijh25@Bo8{$8BĎTogjO B#v~ }&2qwȈhnƹFGkhWD<0@ PJ9X^c`C?m7JFUzI OECW[*@>tO).ʳ^U [1QL1NW[f}]ԭZzijњ舜GGr *N 7f?7uC&eS#i?~Ō/ _7o? O\WПb&Sg8c;EC;ӕj}D$rozgbTUhzirNJ 'Nlkb "v>pMz@FYNzqtXc9|ٞV_|(Ap8U aߣ3bBd|^ۑc92`M]l`Vq !xZ&V9+F /J9՜3%S$Cær؎|#ǓV*v#6a∉h +{٠R/@)F)"a_a_pc#}¡I ӷ:GAlT;u j>`8wh7 Z!НlwH!PXWV V ٺvXscDh9M0sCG4vNsM!,]ci]`߮վ~񭿖*l\)l3>H߬k\?TRr|[ˀy׀}{ɴtyZ>5 ѣBfp5Tqo2}m#'QYT"xhOS^K!df{aG CB&&uf02ҥPڮGHO68Gјh r;hۑsmuzi&m$IC_|Lrwŗĉb$;`UoHɤhTX`X4ɮ~zoBFO^2iLp}yGzfLPm `X6l>JKМ  H @ +b-6&4PZb@oZ:g[5Voj pz6;lgAcYg:P0 /n )=vRjGDJG " :+)oP{y0x70x{tW?sްjG0͘g'^#0-ww +i h}e_$.NOc3>j9{:Ot~r]l\q_Ȯ(ʛeLJ]#Ta[M,X`c6$LFgײ`TJAUC80$`lv;}bh{qn@%fo7<^@U~;2iGc 3%gIk1w!_ GOO%rbɴzf;X^A ?m)KؑhL%S7c9-2Bv#ZI9VۧW*mr{Ɏu0C^^΋ӻIIy,ˈή>zm_ MÓu-P+~Gnn|bvw˜^M%JlƛwbCc*$֥, emug]KiRX'nQ\K{aRwæ :0ugqBKF=ieK9aQ"* : 6Ģ`F zYHO9C2?/Hˍ_fT9;GlP[.ct:U T%a3Ͼ&q-ɢ !K0 p&JHd |6]R>}yN;:(]+gS`~$V**}|HήܼP%;X]w53TN!eu}ZKqBcj3'e f́PZj?Y/w>N SFy6PoPވkqyzmuԀmQ0PRxyi c/F32F :lYdYſb`!C᭎--7P.&PN;GHaN[|dOX I 7YA1mx}֨>A x1w _ }DmF U ر:\d(,LiLȴ;!v7!v'vC}Y B@>XtFW]߿y39Y^,IzuZo7/n#%&Zy|g}IHE]ۧWQ(iRQxّrAx 8"m:LT< YE{8S6G4%,F5o˜##V1y9o(NĴG\@" qFJaA 335tЎ)547J[m,̴޼rUؙ)i> < 3ݎ|6  Lal+6 ` k7 _U}os >/30Jx*)]|,1~1+y? j[,Vj&n=s! HoڙdM)0~x*r{7ZQ6Y-O\Ňu[h3uax9uݷBI>ZQQxӫj$U SuS_x oUv 6FA浼45u]7"nʋGskem8 )+Mh6ώA{{BftR$!AC:o)4z{9Ǫ{{? sdG>h;q j?O!v{%6{.j{[qWfxB-9vVcV[藓weOw*gd5W *f3 ygbkbkbkbkݯeˤ('0!ՔzS,ESM}zUv2TWy%3-<)WI*_,OO7b4F^=sř}~S-*r9MVC;u sc:V'Z2('/Rs6_>A޵07cJګ\ƹC9~yJvt~W'.S%f@:yu]t{3+w 9^оwp>S4/Rg%$^Ԧu]畓|n񜽜=X]J cYb5;6S44z(`vS-LFx!ZHhB%O/KS; _68+<ĞcSsPٖcp59=luv@ s. bՍ5fpIߘE"Jw 8&{Ì,m d䘎;aUv~r;!hJI3T+g+^ "Л?m1ڴza ^mna7z4@f'&!) ylVd3v&6~(N2_փ/m[;SKq^ { dy7$J;3&g 򈵇`kۍ-g8c# >Xt*+rT֤Yy%['gr*HYBdK[[ L)/bf.3NOl3,5rۡwVPhؒm5__tvWn [-ۛ}o&gIYĚIYTkO<ǟISo{3 ?: (٧f @et̓YQP2k"Q9gRhz{}\||8O B}6hOjL֯3 a*)oJu$1wZs%r*<)Y3iMN}ƱT(SwB@Fw1ޛ+WI,9MdOЯ}U< +BKwo:hFBiq \С`KQSl nU|GrKX$k:Ez5yA^8+8<椐5;#HOa@*["e"q RP'IbK9 *w$vAsOUTeXQx&c/ty@h=% b2o^9\ZD]hOK>+@ŀ( V3@n9hXr`h  Z &f=iWnW@* Oa%pɳnt]I$eI=h|74ԻUzgoV;ކAH_E+8e{Fka.Gnhmf sZ1JWL).|pY#Fv.w6A*ER,DQY](MwִaIYHɩ"IZt )#|m%NR7 ɭ Z;8: Z`JPt RCmuc<)T4C6<[th<:ddfzy6kŚ䏈)b8_% Yq\p g׻P JԁxBqI) xcD&v Zu=\?C iLtoMՏ7~HOw+Sڌe3>RB;:|noz3.~'-; є釙AƃoT"iܯ*]?^ԿuzpLu4Z|MX!nsX}ˆݨs{ zzvO͹Ѳ|.ۑ'd66ׄSpDid+xiFpnC WoRVdkgcdd<ڥ3sٶwq0 T^.(p\˻|(fҍnflr3Jhg,[nk/\l` nH!67yJ=?0: Hjkc5ZyFfJ'FtV+8"uX0ȈpMNɷ T T_ 6K>άB;T5"XuMsE9AF5l.5?7gZhy:t3eno:M05 8ݵ^Ay.5;,mt4rJIpD?z6/9bW`F6r6/|xMh*e2_.]'/2(`\:xBx` <]T*q fAS{?|py1VQo4>u&3dt?)tI^݌FofFNm[ofh8_ٸ̎|ZΑnۊLd3An{Mblc$j<4:d Zk`wEs+XE=XTbb Q:U1U kY@;HY5^.lYmLP,@+NTaf49 ,.X->잤AF."D >Q{'R+<13JR0F)a8X\t,k$%׽aH>#$mbjϖ*z$MWppN 9/U`rkl+'Lnj8@'xQ pz9LTiArhV\feZIy NL+ؾ"ޒVh X_"6L 8$cI탖Mk(ίZ#@o].v9mžcKQeֈuVVE|Y.6M}&}Mw':ht7yHt+؞v+G*﷛EäT_5W]ޣ~̦>0k; A}%l¾e?sy3ftK&'+a)s{9~?˳36A*ER,DQYR2'_O\ט,Jyb!4F岆*0NJɈhu! 2P1ze*cs9DA Dcې6 r0n^!Dfg'#xBK8rG:%;90+_7Ɉ5)Q*!kk*'iN2&EO8om$+ki?W: 2I彐 b % 85o(m^N* $A0)О$I IX1t&jHIRNOZO7%oj2)e !F&o=N4$lΛ(k$b 4#jWv-'11]-Z눇BU.$^BU.$^o#^Q1H3p15cԌW@5tXjT/ә}=4pl/_$h=K[Mml?vAtN դ+R.69|o^irZ>~^/ W>.O~ݧg˅fs톣7Ugn'nN\1 p Sm'|T\~%rFv$b8 ul|FT.(H coTX:~>)vA\Je{J]9]PbO7/&g5[W- Lz)o2'yu>a^pٞar\0mqi;~ɬ)f1{L*-D]3 +ZNlOEZ~ؗR¼^ Z1Qs[[1M a2"C[]|_7֮Vx5)fME_ 'nCMk9|2lZ)"QjBovD9 'TmhX̥#|-J\+rIeKԗᡤ螦"J־d<\MmARHrZW_<ҵ$t`rKՀ{BQ6{@Jۓ2]8+DH:k$CBZf/uaaNv܀g:z_=I*8jwWN`T5G(/|xM)~~|^r\%zl  [y\ުQtN qIc$g%>8օy57`^O/ =MuI{,O^݌F[of\MmҨoe Nbd;@đ˰\,!1&Jr^1oFnd)K3S2VL@B 'iYzZxU rUhKRN },4 ;,v`K*(Q3䧃B9s/g%R9ЂBv>l!2ǹ\8r92|<Zj0y[+4slJcmE.xAڔP{Bksׁ:9^4hf 6 fn@F媭˞ hXlboD8zo2yR_ЊV<`pVw _#PZ4Ş/n^)^ e,حo(-_Zn/k_|˗u?e}Z/N:Q/ пS:". n)(As%l\9b'e\g/l1NϜ@i zZqy[!r֙ȍg,o%C}dB Ku2J!iPmI`*fSC#hd[4^ԤIYU6}A~s[^k[։Ȏۗ}6yۈv6ᰑvpN",ě|>w׏Nҗ»϶afj)7.=H he 74J7"ȄjBڑ%n7V9Uοfo[5옱ډ{rCy*X5ժ @V\vڙ؆fDsHmzBNI&trx~l$- -Qy։l!<%( Udw**_z3SL` 63A¢G wٻ6n-x@G&y/P.M>kpƭb6]#i${"$=^5H5Lʰ sbdalGNI[$k=1c!(Qy]UyVFpQDrړB嶒V 4K&F aT^$^UyҌy.;ip.']FG$;xg%=Ѳ]- <y%s4ȜGZv9M>y2O@ne>ݾY}^a8Zc9rRdYer:FJ>s\b!:X8t2^Vug":Ug`kKa MAJd]uFz3()u<K994 mdavcpItݱ;+{U$y,6қmY؝j${VŽ)40($EvY=|N*pHcv\Fy5C#rTn |Z e:}\ZHج">Dlh ͞nC39L24۩pGˆf`G2F2B  QHhcm7˱щF\%Lm( ț08R=Ou ;ՓH[[a9ͨ)U7mA6?+aןޤbMq 8'ag(9Mb!S\V]|.m$rVqs&@`:P,+_ QLN#\ `2&=L.T CPڂD Cq_kC*VUP8BU-/ch PPA ߾2x6q4vOŜLXC;6&5hNVh{#^kIXҪ$X酊PPE)+.|*\yd=ZP0"fô^撫haZXOїstL_Ue2JnB SRՠX xDPYұϭ)O=Tnrp5 gan7rxc9e-ե<+&P=|q;n6_n>)loh@_\=Z6=3ѻofyм cO󯾿}}}5myj:[(o'f[Y4FBj3_}sޭ qm#Crii7oD{KW{7 Hn&GG@2׆ :`3|γ9GG3[WZ87P:92ȑvC.ev"8&TYح!5˥8HY}MHo()SvR#.pBs LZJ]v[/%Tv":&jU]a,YYeaޜSvmNiKyNivO0#aZGfayW5YحdT8N8eD. fˆ.5svK C|}\vYmg>VXWQvk,M;-Y6SO~1D*T4ɹu]M%NyJ* FpFֹÍ'wKnmL->$&fE@,MZס(cd+Pq.DhB@ XՌ/65Xf(J< C>T5eTΊd~ ҈`%_,TؚQURZK&Y4V [U +8xvX:ZUZD@Ot2̭f}{>0P2~lCsyO.)5/C M GmʅgbgWlpUI^4_y}q^NEE]Ο7{=GIk\]42~K^lU7g_p%j敖G쉚jՓzsV -T҂v'AE&KNGt>2vrng;iVdochYNp,VǦ9s;ЙjT+doձ/f˥E ~눛Ǔ>ֺ֖mWF9Y#wYV#ȼYԥ5:icʈ-SL s͛`f!YmڥѢμYhsxz$)WyMRcUTݥFBiO77xpL;LQ0nQG:,Dؑq"OrLpBsXnQe]w꺎Jf<.16v(OJe]w~{2PJNIo$5Ns?_Z݋vţǏ)e={uy)XI;ci^9D}`pdtWݎ֯iV]_CXRv5M>!s a+6U %.)UPtm/\mk #/`aPmmh֌J::QufRaN3FӛiL\ɍ’ZVۂG E|\2B+'26i%eR3=7iPãzgp^k`h37Njc1w ss2Rsp3_JRs1vi9K PJ-s6 Sh]9Z{\4mч-ܣ~d |*a:i,LZkz9RsvcڪrE˙6WHbv,T CPڂD ָ/YϷ[TRQ0O]AO:P|Zz]c9NO{nB1c,IOO}{/Yr{?psyOVkO_}xmt b4WW~rW3NcIRru坚kR:`'*F5DdLNR$fzsJ}'$wFJFN x>@cBNw!׷Nj/X{N%6J&׆/9[;@iqF&ۮx=o/6gG_:_]6G|b?u\LH-qz.u qR Or>K0~ %A1q>#~忟~lnFUH{Kp[0< \%J5VLL?J==`Fr,bAoCAF)J.%lfLg#9je  ڒ\>ʪVFU OtUvqi~ϖYB:wIއS]@8=}nݽm,5?" **%yb,+T{ PU!AQ$Yu}X.$\ q}xU_もٚ )P43WGo,@ t(-5ǑkɬդA!H#b%s]QpE^*?j-ʪ$)f#=a^ pT]LX}&T]Pj&Y%.&F} C< 4Hҡa"#"o zSx(I cj x^XK#kb6O$$fLo7 Gr9aͩ&.r!' \qRwi|tc齷՛w?~/&Ww3Ag׫۔+h}{<)g FmKnd3aG3R_qkjr2^5v΁(+nd,t9&HԞunmHՕ$?OnJ?iޏáE|'kMm67HX޵]Z&]_[a8|2`1}q5iڹTtdOlb{blV[2=Rp;VA`_SO{8V)_|5k6 E%)DM Ur,]QA0!X(hZv(g0U*QB)C4 "r-##h9e.P;VY"ԕ*k_:<.YGīېs`hEl~] /L0ZD6JZ8 mʂ@@:T`릃B۵{}ljĂ7kVvioMan{\l#tu[i o}zi-YM36E:eHG\O}[jƸ4/Sz5j>}ҧO,䚜TX:FjVK.P1HJ$UY}D&5 c販ߝ fJR>O%mؒ+sUB=RHvdh2KY$]]_̘UwO&7E}Mu<+u,cZ2-\3h69*)n?)#3J2e??j>\-XNGk%@A}1gƻb砞_3ʽ߭'?~!vҼm]zbEVޕ4";! 36 ѴȒd&u:(ʲӘc]OuuUu,Ed~aW;_UA6Dl-VvvwVܹkWK ~uMܽwWcWbئY{݊*fXT#Ʀz~UFgO)l 1۩0Ca(°Lj80FQpCXt9 =.%ՋqSY rxaKi,R^XD$p¢':dqJBCčBJJDh*Zm?9F gnP;#p+ډ[ɏ;^Yy>PTA;\6Dڰ AQ HPA!Jd\|$Fvd#36=VD+\X˷KUۗȌ9;b$^*{-mXnUU);q9ݼbU0SRSmX!8%PmDk"ae&g}8J_mZ [cFiRDM QOlz$,RK0O5! H~H9LmnB[Ɖvw-B<0i=0L9N'\m(FSM'5d#sBf淛nsͅ8k^((_g,|lxRS1p5K#`r %DGҨ`ӛKcup|/#XYaY=TSɚ5pHpï+$Q;.1-FJRdŽ0q@*L8% WIP*5"ɗ?B'8% %R$F`;X3u(cp.c,f8r%wɕI7NG'N/'jOz;v4a(F AaiILwyaDˋ()¨: 0:^Tʩjn}7D'~3vK:vǒ[܆;`՞~DJ]\ޗ W]'tZ/7Bzߵ. :XWeNS|ǔ?|Xn#{3~qLm5 \fƅ_;Zx/ Lmxsstr>I( 4Kv7?^E&ĥ<'OiDuqnbJň B1mmD9`g_q8VTY{黳}ueEj+6an dVfOEF*gD8C$Kyš%h*\_9{%J .O_p tJ^Щ\uΔPd?NBA(j Ƃ4Pē~#%/6R. ;!LR0 M"QՌrnTTJg{lqa.XcFޟyfA Fw!T KgFc3}VثM-ZuZ_niFAԱ'&LP@Ddޖ5~~!\[\Z heQ`oF@STiW_ckMsmK]r`PXʞYO: S^e=7`Y9yZ:tx4u 8dmԚ_LFfՓɿ} &g}Ϣi7dcM?7p~ftTnԙ}͌}ٽ{ ˿h^ؽ/^/~FwnJqQʟW޷NF8vQ8~{&"wtg_@7&F3{L:X¢,qOiT֠4D7:YO2x.xg"󫳭c~崆UɮMss2-4W̯n%teg0&So篧/_~Yκ7 j/`~ۿjh1*fu:zmT oq)6~>g<Ug8iga~;/Ρ? BieTL:\ _+ç +_qv LX87 AB6_ʰW V~ہb kݎ& ,s̨%U^H,}&~I ykAF%&>_XDbA -%d*B*HZkn>3V+ v'MckBiu+]Ԗ6()8;Zv}E&N'#>~G ;<\K߆þևeM|'y;H:1X9wxUSWu[*ui^'i_]P=8UHh| Hb!Wq Y^ՇUe aڊ2U/w WYR~V"gsL;@?7.;@t,(k\"Q" Xq! FE&Ltnެ`!}~Mts$.\taEh_zdΫw%.+S{xWⶮDe o *4?)TT M!o ySh S(K./o ySțBULLr)d}sa"Xc` xy[` ȣK ҆I,k;~ô$7Laa7>p0fu2\gjJc)SHadҐE s ƤT~w9tI~ń$1aVTLrKy-kFR|^ZdR|zRӐ&2A^Adxp R!7>.R1J6 yGAs ,m赒6yVBRs=I;F|H2=kX6G@C}ݬ"LL)؛z 57 liϭ0\m66\0Ơ#l3ЅU')+'5SsRٻ8$W,3ʌ Ga_jRRgS @;)LĒ"r.@1d22^{ZPFh ۓ: T9rG b"]G6Yca1c0D hq %Kt1IAlLȵETIM&.(Չs 9 K^9q:--[_.CRE~]/,>E=>??w|XfH+wݷ޵X2{W;`ƛGw O(~,_?,BJx3}؇\UE`,q1,pcmKbHk Ϯ>H4#WջE~wpNabw*f[tdaY% ); i7/xѦfYrMLz{fHpjIxʍ\4Tu7fy@ ׺3𪇐~E\yPxD0*"O.],H#!()a3п3ҶJ0EāU\)=$4F]JdPv'VjIk HZ r$ܔ\1fX !10:9HÕԀyv;Ѣ`FuV֪}u#T݈ bڗi埯?%w)+K)k&2W| rn?]u| T8s>Ԓ5ianZAՈUFO8{<^{u3.}γ>Oލe?0QxY?̵DQ½WWBg$~`_ET>WeNHpFׂ@1O |LR_^ips2 ӆMd-/2Z:dmBi[hK>A)@8 V즣͐QԶ&X1zȿeRuI^ʨd-^2;di' wl7f2tm*U -)92sL;rpV{j6xR3}W$]^X~eلfY]h.*Kq\,wfsR>b {&vD<֒`! yEm~)\Di4ѥno^? sY|Yw>֞Ȟ#WClBA-AISs\|II3I`f*+6fEC[蚦gbe]*?pxFۯ:jB0w + ˕8BC6￞wDհI3V=5h=rspU`oUܺBfj{_1;~eo_vV=食}}2<\p]};Sq2&CQa4Wv:a'-DS55 + 8[b<[=aTl5Zx*Ѷb/AG_yqX' 2 $0>iAs*YԾA_rx1E%hBR0*?"?!l&? uOkrM%_Nc YTjhr?ëJ /󍘷14#[k&:pG,aZZ*}yO} 47V1c==zlUu\,t/]Sj(̑/{,ה;|^Zr\=a4 <\.WԹ1r3$+1uJBƐW- [e_u\&wMzE/<hWY~|ww¿_~mUy͸hM\:1Buo6FjV1r4 -U Z`P&>g`r Z.ݏW IleXHb'WH2 IOۗqB ]>U!AERȻW=Ad j7YT m}T"Sxk&_rAW(J(tGZHwجн(1x\B N2fl,C`eGt."%̆dT=cHm10 Q[aJ0}YU[kg99γ5`PȳpfB`,eؤM7^0W-藄WfS xC{ 4#t܆j KzFs1I*%."GI~lF߄_P'}0`Dlԫ`^[0`6N85^C匮l l->Ъ$0LG'm^ s'g9%P_=V- &;;jް+T9 ǜOyx~18sz9b"6r^(t8Wңx6r5*Ƣ2l_9 p} _ gS?sհlݘpZ +*a hw3p/0}뉁3pezA̛x+pj>X"WtPvt$G'{Pm`Goi7w5:{S 5>wsJ?V0lߗ=^+‰hhb<$oșu*qQʙm@.@nt@~Nx"ʤ6c92Uk Ч\/ %[$-Ek0Yl$Dh@a"ᳬ1 l7e:QMk޻ђ˥<#nDeTA_wM )i",NZ * :c̪ct1ZHfcʎ A[b9́(1 $~rRznQ*:Jk Ġ1Q*-T f3(VG(n5O]1J%y (36.[8tPuH`\1#2I^/f㍇ȸIDg<*.-Vhge}V<ɓ6O)Lʖتka8_k\Ikhs+r|ҌsWVr&/dVޕui>K:I9UC I(Xa,}./AUQUz2-I~L-wq~}5v 0hhwRyh#iqMl!fS̈h<91%RD3+j_q1^]6mSKwpLJ!3WEV@*v [HҚu,=$5n6I"8)$xJ&%Ikα4kC@cgSJ:b i7w"B'7nG5ڙ 7xlM[UAwswJHi{EE4}+cR<(3䲱XΙ<ȀYz0MRDC-6 vY2X:pc8$Lg!#Aq&8rXAUf^suzwsy,)#n mIe0pV޵qd׿BA`[u xmxWc?d0i3Kܙv{N5)jhi1,ru]]գҧoZmBǦB]2; }nPrzrx\xX[j%mRJY4Tb5Yۘmq-^|u e3$5׽?F\dodp{RТkbdQF+LH, ZӤ0Z};'scYIe4ȷ9hZRʥ55$OQ*52魀M{I Udzz O(q4n<^ E-ǎڃGs=PFɓI PZ O*@WFAr( yYAE]:(?z<67گ>6WOO_&o(4BG~#habPѓPkis1JuH|nmi<( ƅmv6ܾ:.copU*f0~[}p8VgOZȮ&nb%7h $a";MXBaX=p;pnl7YPѤViyL !z)1~+7 Lޮ_]|1;8U/lc6tp`TÆP7v4o7^K+ ݪ?Q0VO7Db4[i+So>$વu\p2Nx\~qalC~㘎j/2A/ovb["G^MYiy>/ϷH/|SqvIfyW;;7׽vR7o/v?~OucP׵6;mmu^s}߇ef73|>nw_|:_״jx4ݯQ5_@U~ u5vǻ;/ YWMG7GYK}}pO^뛯\lr6 E0VM|;jal!r"1*X~la!A~ӓc3$2|XƗ3\]$,i%^orΊtnH'9]hC'hwK?.fχ!ZGqY޷dl \|1L=& )14s>`cx˿H'ĿutQ={4YÝV=l`c i]D㴛 ZV}~pp\Mpbkw](,ꌯ^8WDL"2MOr+"sMC4q.F^"/x>"{'>Y:{8׿ޑWn>1W׽뮑2>'ǘ,rf@d`;OX!Cw>Xq֘텕sa}VX$m9g˦(eSOMKӖlٔM=2D }#Lq|~B2zfo'VV|j[ҕv=R:+><+ƗCz:-g ӿs^»;;<::s7҇j㏟͆/j_1#gӝ}O÷oNͰ)>)}8UcV>@&̤Pfi1JĔhD.vFJ86'\h~3fyngl0X{+־?嗸솟v㢙uK;sT(+Qoc.*JpQ>s}px}? yqNjvFn66Sj&)HII*!v$c&5NCNMlNHYs6WWi#Iث5x1K b5jY1aLqU)=r3,v6DkCdyZ.-즴4((-JP@LL&S G(72Pl)M'F{Y 7٠gKH6r,zXsOt2d炗;:ZRRR(Vc|j8oE)!aZ Q㫤\70aMjE{ׯə+^pml$3LD[!UAUV8XߡDSb(wu&$S[6 ՖCW*@/LVlAIa70aI0PmC{̊{`t6e&jhq*VCL?i>GNXxt\r'`IPT!cJDjiX &EZF5T}!ų* Оd i NyO4m:4R@ƔXHf ȒBV0ikLקnjqhS'l-`o骑@X- o}JߨNM0+A dUvA@98nS29:KF#1Y~%M=nX&F*xԳ<; K@p8J@!O94R@ {%eAVcܓ.Rֱk ̠tP^Wy3,JG)Eh㍞XH.k ȁ(ܯx6=zFJZ=dDFaMy;XwnReɀAqDeիܩ 0!@ !wf؅3^>zXBJ4JFգ5- t"~m*|8Fu`q,}vG~i= fU4 Asc $84;Ǎ'53!/cȃZRKp=0:Ψ2oXZ0Ӆqc#F@vU@ IGPVhݚƛAq:qof,TɂNU~t}*u{;(x@5Yrf貚I;>n hr{c\ku'蓹hLbtB 4B4XDh;K1;>2bRmF| ZCpFL @b,CkKrA鷳:w,H r0@z-r Iٗ;f,w&NmDXj59Ӑ[b ̓7D# A_,!U2~ C 3)H̔Ȥ2 K7=DA@բc7ZP[!؝ !ٻ8W"@bKu[u $2݇DЯ':<=$/+pgĵ1,sw{wꮮB o&VPaV٥9\_K.o/JA׋刉20.rq3m2 3̭m̭{(7q‰J_k O/׵S+6k;7JDY8]B'm! cJ -̀-8C@cZ|[.o&7˶J~sa,BW5V*뫚-[1Ѣ-̓z?Aiio W A0WLJ DyO{u|@4# d;(87Mh}̴k87BI&Z0`z;lq-h<vݎ~q]3Mmm=El&VSpJ^S_59N3CSvEƩ .WHYAE$C ƣ[>[׿dEybyHڥݙ->i͙//:>`%XYN^J!/Mv]a暛z7nz4܃m6`--aѵ듳}_w}-1iȍDȫz0E˭^h_}\$;&,+OXk+NEmSY,2qi>/u-4]t2[.R;;[1lYyPħjCnΧj k#`;34Ղx&~@8'M,6D N='M,6H 䜧4q10#w!] p1ImZfv|:ZGК,XUPM;u'Vڭ w )0v?xI{c\2GyA =c焕^QDyspQDyOI,9/柿mv'uZ~kIϧ_N.ߗ&,]c'/N/oϧ,I#>iĝ0;[^,~Y7J}˭sc1l(1~8?k'%o{l6d)، {GV[e}t÷ݙ`^[EaL}6D o>_`ЄhstO7XӜc ;~bz][T&YtDk*?(iZDQt;b=x r<{F=x`K^Q$D]p+Bwd[`vyvN$Aǎ%[E)+ČNz3P] yY8/s ژz=`8^>:pS5"$u=\㱕{$#mfFuϢG?jojFͿ5)>k;%=9? F01t:O}2d/Wz0dFz ijs#]zOÎ59gv͖!0f̔yepFs]U8O>P#kO>'8"D'27s@PtfnvU_Y^+~"z۹A퓩VO/H$Yt)(V"Rt9*G l݁j"wtڎCѭSr*ɛXhyRgn|;^xg]/dJ%7_+ݧ=KaF:w@J?wj4 g01h"Mv6>h$~m E&޼ 36260 ZýNW.kcBk-&EFk7OZ8u,۴|! CL]x8 j7g &tJV-RަrTu_mG2Kr7$?^ؘؐvcS߶=HIX^W(c ?.-$ti |luoSˇY>~[YĢ]|//AjW] JIj!Td#PT5պ@6QP9+(bMgZaf͒;Ԟ/jVy#|h6_,,,DE29Y8,R)(JAf+Igg{׬~CB+3(!UɊP„r .*υu$\S#dY1dŨhЃjd(f}0Jfvh dX\#@4XW5*2Wx33tZ&ϑ8%')j i3;xarSӂ%pёKTEѓօ$=WR5S1xk^顪p?꼭 R}6)b:K9MaP/S(X%9#˩0-T1jDHBQ"*bVXXYGAZuRSفGd X[()U[fS- h6H{ӮUb`2xU &2<+&fn0EiDM@6Yf%$i YLWً7 U"KjTTO"spC-E^*x 'P.,`caCH$F, ^3 (k(8XX 4籸#Vו}!N^m" GX,jۿ"f [*+'NJavoUrfڎEt&F@I|:KvHldb9m''\jS!OL۫SZd͆02Һ3 4KS12+zpc" Qv 1Ao'Vj/(z# ^X*N ?P~@1\No'Y$`ɐ@MvgW k!9hCCvIVn_E8 `pܗd@8=2 @PʐC,ԃ1hm9 ZDUr@0A S$: 6bEIڌ!HHPZq6H) ^AIy^цAuuHfx+d*X`AH"vQ@DBIRdbbFPqArszBo} /:-n>BNߦ}HT t/jJA?z=>bK;>3TEz8L$`7$mO C5Є/rx2=k6#rHƓs7#XkuϹ gXF<Ƀxeꗍc>rw +g̐mܸͫb1Yo/ -,O4ׯm%xWu-21NjT4ҥ9ȗM`P4E1Y-&WpœuZHg D,,GEq4tiwݶk;Ɩkq]I:E%2գɤ^7 Ƌ<5p'SޒrTY2)dI*n-w݉XN fǓa%G/i"+d2k8<X-,ӳrN[ <~ma1sA-=Z{"=P%٘72LulZ8`Oe5RQNLZY8"Me_5 ]8:# D̳P/>˧Z5up7\lz7atuݛ=Vx0V- `ƪ'ؤv0#EhSFԍ&erï'=M 0΄=LZ0 {ؙ; ۄ@,0MSƼ˞!CVeR[*#QFjb#Qq$zG>NۃH(aD &ϙZbqmzHAQ֞s 1&90&M5ZmЗ ZDTL6k-Ӆs QZse\(猲ZgEf"˛=r~ kcQVv}yu4F CZ|DYsBZ5B B磀ZB#Ά)B#F:tB|[&|ɰvNrnfZ9D*w@ZKx|'iT >Ze-AIl%BsfKG-rTp<ْ&RfKG-A .e3SM[=ZՆ=콱wل3:=xMs3I2c^:R0cB #Zu4Gh! |kl-sTPqerY[82G9-sQC+n[gup![gu<ΊRL묎YQ*h;p:RNT˶yYq:?N; JY<(WX dr=p9-ix^ϣiW[)LL 1܇|!m၃aHja#ִ>FS^B0TuՁjBRց\%:С@bes-(&OTIWCu{Fľ1iUIbG om|vsGVt s5"SJ\9Řx҄T ҫosa(kd!n{ RAuVSxR|ݮb٧zZRǓ4R; dRAYqBZU x\" ">N>#D fS:陏|~|XCION4F0]ra|JڊX4B> xMxBj~JR|(Cx }`YО+0iڞzXӭy36D"{V+=MR:'vR' $hQ\DqB&D ]iV6;,1~|KQeʁ`2n|O&>xRQ}`?1"a']FlvRawDIDE2iZK K6geՈ:6,;xRM(FJda~7Wv$5ኜ]zE06(KB,TRQ1.$˲T|GliAyڄ+ug]J}SE J[FT֓b:>7`L;,پ+5iƞx׶J]R6InV}'i4W~W.,]wӿLa:};&ʾffa\9.r pk.n'{>8.S;t^3qσYg>RΏ7E\g)Fb<ͦ?[>g!.fUx^F5Awj߭5Mns>} 39C7,iaetMMmXd%~viZ~6]X|4"+Pr.^?hht򷾚)T.?6ҳ՗<-awnvϫ4 Ϝ1 %Ѕ$,t6/V_w`ipuQk[n_z֫=>j}˟7=`uMxfg$tp>|xVyJm*gxB6T82XSfw&)$e-{Y)zÕ0dmXs.{ϢS vMҾu֐ܐnfzYSU[Ӿ֚ښi IV뱄N4%>8ڼ.&\ɪy#K sѸyuMX]=I h ε=ŞBQ~CQo8]FwtY7Y*N; ..J}YY]oS n~U(F]v] Uװ >h)qx>{e_~)c=HE{L `lI5F*Q/~oC>tAϹgpŜ:$0Ix95 ZͧA٭ )=%R +e6pH_z+f>kLLؒ EђP1VvCxTnvKE/P=Bgz2f`#qI8d!gc{"AZ4f6{׉3|FfCx}s@/ܣIՖ0r+5? $i-5Hf l%)duњm[Sq5gWܱ֫VK%ۓđ=p|Ct6 `Xro<ޞ)bgS5-E{@ Tt_v ":6ޟWBҜ`hY D[qy3Ȅ/ƶs+q7,8v]˛P3 H/ΜXG:«0g$$e=ζM!S,wn B?1 l@HRTx>"X?C\ ҇=A/9 K0pt;NqQl_. ub!h5 l2.%^ö^esq1]׮Drك]ϧy~Z-Rs_miرY{4łukWeA(>D`O9^Ĵ,5X۲&z9BLQG8{wY6 ٲJpY4cS)JYerFԇ^YڃU)<^lJԿΆ*ϫ$a?{0ȫu=__ $9I&2PH ~3ën}"_1JՇSyNXF1v)y`_]u1]D۬VOQDj&)U]Y(W\K><7[-^'!&|-_ uVMׇ=M91#2jtM)ݕHQWŻ ՃٝwԭJ*KXWm_܆/qH܂vI~Y@S*Υ.thKau 7JZk{Ċk" -3S>I&ޝMV' cD[ lF}tП(22~rBgsŻwSb?GO3KUzuRpJqhyk ~Lh0EcGpv#=$8×$MSC|_f*)'˥x fV IF}rs1n[bK_=3}NgY2V]>n?àkZ-m#;CVp6XE blfV$.i@N"~udeH{0Xg2Ympr@|Őrw;$ TElH8 RSAx`n[l~JrIo15.sCGfɪu4uXR n668|NTu2A$Q%I3FF~yfP ~;ǃPPwwO}.cQ6 ß/RX2%<18VRM"31 '|&wp7ZOl6j!76Z5@؈-zdd_35JS^JsE?př'z%{`X Ixno[)BT?kؐx9]>|mk#K޿"RQUkOmF RH5A׸7_Z4B!nZ./EFo_VYhoNfBGӃӃ9[k֧> )>U8?U YeK{p}`HL&Lu,)1CDkʉ &,_mB0Rۿ) RsDp0 mBhU/8c% ob)Kf?=i:~$2E~S1>zQ#)/&Z\szdH# x3NzEi9Q JeN÷8;5X&|'4S(;QJ4;ΰ6$Xƌ7Ƃ#))~3;J#EfqzV1ʳ`^F-AJD4%q,y'(D{RӑGad7j=ƓAՄ8.rبʹ{kѦVu<=66Z#&zntrNZ4ux{r(%^7J2VRV?K߃zG˰ОJP/T ׇƅ3ivuEN:L݇ncL`77P0_Ӣe\2T7gROO8P1ƈ1NBm(ᓱTc4&q؄Lc."!"R$)bc0H Ux+y7RGJh5\A&MԤ@z. 7 bdGsw?#=C3{[)4AS~ʬ>j%tG-fR^ᤷHPRjÙ>!H.%Z,'.k G"EEĹݿ`'wVoCOPd%fo3NG?Mvdu[B*4tDOW~}zs~lB1q>~/5K{">-c-&SI*=v`B"jWn)j-$[7awl\D35GAٷ >S r Z>='6O|- gUn:t8̆`V-wO̓SkyϰFܴk/ ; bʐMEaP@D'&=i 4ܪAo7W/H؋͞ YL' îrZzj:+bi<^n; "ӏyƣo jA irVvizospLc\vC%QT jBY+{[cDH!}t?cC_*3TMvi`/&nztTwmMn'g3_\5)9u6'u} $sqlHD@ iR3tF`i|gWח6wp'%ş_}XA[e|v79EĨ"!8IJLZ (xw0݃~9bQuB7##7/#rnmaխ/> V# c9ֈokCV$]Oz'8`:)`I ZasYVONv0eK5L] t~|S D3 6'M .d;ap(0ґy +ADںw)f\*D$ba sRRƙjx+1+p9LT.0a4h0RQAB`oV{ x@b}zN8qYv+7[\^cf_}2 Ks'ln/̇ebCt1=/ϭz?ߕp?+r}ƗwCbk&ڧ@u cxtRz<~w{lN zb>T}ߨم56+baYi [(ɯf|јjQ< Gk%;U0c{g?t8okU@tb&q=MT}z`ּcSW ;j<JQBV+&&Ǩ0m_$dfJe;('zS'!%m={yMٌ8<}WҺJ'UDφĖ~26yO`̀j7*"?A8**5P#'gxiT[F@ucHjM);QMk|<RuTV^Xvı8jX(3$* :(,0ږY Ħ`\o,4Y&QNh#Fz6դkEN[`י-su-5̓ZV땿%}y~富0#ZBf.2.^ Vm+X<*lNjo7VZJV}ԋUQ@+/уB8f&#Fl:G GK'=΃`#b#!XwLKp;;ͧh]OVumw|wؚ?[dhH|Bl ;kwۧ+EB{Y؆@of jEeb th#Gwb|Ip}\/ex7q7.Ʋã4 Z .q$[8@:qVO~.mF0nJ'bk^cԝ D𣞳VOEۧ1LGN"xC B @4!ΝݏQ `|fGooLQ~٨A88sz gru\RQ&ngj+zg(!z״ځ{bӱ#I/XMC (Fv8㡅 rCk̿w3y4e@4rQq 8+H._>/.)22<Ru8Q]]i ?*@Я-A 4 A:괲wd.,@mu#p9e! fNg_Ӈ =uAs.T!(i*Ta.h^BILBJ1 4d)b`N,&.52܍&c HC/*LHDXZhqƈRB -1!H(Ⓦ(2fķم2ꎠ d5ddO8US &{oܮgD}TeEy8|*]1]Mbc'r8 ͹r5]`dLD%c:2#Hlw>p*S䢳rG&E0nNR%ZSD'8xjX ֊e<A'ZdxJ~D쀘#'3Ҏ4{Rn2{XINjMs2H$" ͜o@?KA1ƳRFm8n<@ұ(2 N}RoU~ ]RdC6*"Au#JB'N`TC4h k{;] C ӝL 3O0#ڰN-I`[xw^'t !dhNC bɵD 2䜬,@B``XNuh0a80H'- 29yB.EX;buG-:y#ʛO<`)$GV)H ܱ-K.oչoTGɖo'>pPk &Ԯ>jUl]O~X3ӅkMy|&V(KJՏ53:(;?H̏få5.pi͆˺ٰ|]ݙYj+H*eYJP2M H/Z<4#ƚd/K6klvd#lfYR.>ǫ{A7= ZӲ%muy*V*n 4GZTi*r;vR|5b煉M7e4^ލtʞ*ᘉ!l6 .< `(1fH kE8,2B1$S,'@JhxL|=*GJ0n^S/DXUF_=~>=0/_Ұ~9,қ)2'd4+0Mrj 3c.1 /S&(Կs?,ȹ~n>ޚf!bۮn_GgaJ!VL+;'w> y1FXT!nCv6i @G$ ibKlly'PbA=3̑|4jOO0WB.A3CifՋssE (&aϣ؍qm|\}y^ix` œɈJP9t A ӻݼwa`$^>"nƻ_0U">:•Jјo,8w2NG7G05lICxˎq"{3ƀIW91L {Rdw^?] C?ՍzE"rlhqIR7].Rn%TNT(ZvNж !MhEm8(Xç]Gos~Wn,Hcޒ2w,j[1X`x  ؑkN]rFhFCvO~thS{+aL(nۉb-OaV0OuQl"d=`狴N"؟V 9cdYsbqR6alL8Í iz ׅZX7gMua#&ct#<๐Pao}#}CgDTm7ݼ7g ht]&ceY3ǰEb}販 )J6qS BXSHw+C7I+p\W}zM5`@eaWj7Y<\/2Dw IYK1YB,^c2#G|4P<&s8gCHJL8 53s(@ǘ;C1M GbX]ES|UeE\r^e7gOh-G?\ Voagf(n,nDQW_蛪_7ޚcq:_}1nxZZ@՘ R#>Za N;ֱ`{ Gp2'E-AΕ^ߏS-? [i;kk9 2'˒yZ+Mv3($#BL\SF@awCIjr*IGvscJO/oXb.׎d"qN#`NO=뀛޹`BYRBp7c#7^ة' NAvwYz%P xmc|<:@O8FReK(r58dm5$5#h&N2\DQ07;K{MkP\8j7b9%e(!i$+aRpصGb!2Z}LHb=~;:_w;^>G"*L' TF$W.(vo.|DHA7e՗bo'8ߎ1,Gm:b9zHs^;8r$1=HBъ䏡KM`$La'3"v:/ǢN,Puxp4%x:GP{ƒӡO)d<܂'A;$ 2[xw^Ld m0 l rr!BIX-%q0Ev:ZO00  Gߋ%HA2ydNXFOh FqZ9˄{f֩-Op?*m ^>8ɯ:O.Uv8Z9_C<Ƅ&-M)]O<S%.}X| Zk^*Ĝ"4TH >ClIKrvR9m_ R ($"FL):TB tP:ӜBSA$nb䈨"virAӔ*k 3M"$7 Z(<%fd1%,5Xl LgjV|,{1w!LOO޵q#E/l|? ̇, ;~À͇-Db#53-5b?Q 쌚jW,|蹶fʮ~уzNr+Kx=˂ i7+;[nۛɟl~SLx->[5>RO27ʳ/?~:έ%vۂӝgD>Ο9r{YJԬ)T ]oYX#v3d1X)5(V7O3g~y{b 3G;v?ffuN79noS?^?:a[.;[~,}`E҆<}{r`J1o&];XN5/ WSU[-TasR#TzO);C3+)\}"2Eyj=zȝKәq&+iVn^y.<)9|}_GpBf^bA+2Uw‰s1}|37*"[ѡcdK@FhvQJ6QQ7xgb9jɯ%v⻙uh?AuZIavS>=(p$BvR&Wڳ o]PJ/c;[U=v-2ocOBfsN)k+(IX>6ϷaxIx$* z⮵i~~"AV$~ԑؚgʜIXO3gzL`pw,'M^grJztycrK06ZW *2_28 H9TI I~]t ;W\{rAe./)9 u+X 6:pZڴ~@84&:ER7~(|=zAM~ӝ{tEcSLr͔G TMOziKL2 .(2پ@fWZ.0C*WkZab?L[jSͰIV %&|!y zvznx}ێ^O>6&OnV!5{Y$lcgu獥V>(l_B"֛`mzm"ʦQwKѽZj<$n b}{_'_ڡ%Qg\Ron8BK5w"&dP"J 07ijC|95CRH21K  cYRcR3 Bє*HvP\G l-%Eo >sx!s )A~<IJD)Kq2Q^$ +B phv)Jy)R"E3C) REc,HȮ3{7Jjr+Jk.MVRw4RvV(ޝ n! ]J8J"Aa*#@ isQC:wߔk:@Ѵ[%MybS8#!Ei74P޴c'y܎ۙQ;<[s,'Rr|Ӫ$ WHF!NYj3t7Ba?ǧS%`C\##[6檉H?ho^xʊ7j&q^cJBSۄ{7POW^$^ثb]:"k~^͒ lvX&K3k9Zq쫉?b>FroCMP88648_ 2}8"δZ1 n.ѡEQFW<Q>!)[ߴdըy߾p1:i=sx>vtD m~&1YzSN*?C+MTV:,Ks@Z:P p.6wScJ28jkCmk3jNe0Q!9⛫T#KrDhBRa u)Yd&~b'JӰl$Ԛ+ ; *OL$`$NPQf Zi1jo @x2FC'Gx& ߠs6F1ql gXb}U={N{yjE87j34"+Z.uSGwѻ2hfI@zKa0  Xm(K)j尥FN[QS8Ӂג>f}tr\*QK꼐iUpIoQ8j\q !2b?e2h({SF5`p[7Ϣg&)^@I)nzc~6sI:I;J gƑTP6ڥ Plp~Sc'S)z"r"\\;z\]C<-8%cpxu48 K`߭x K/ѓgAiM""JP ܷ\?{7,xʛzlݡ*MGO? u]kLTH"ԭcE^,B] ;IZ=}F=H4ϋ'vl!U)iR՞' 3+ SoQU3NR%Z˖戜 EKp[<ՄBio 1a'cSnH "M F#97 Xf<}l{;vegQOK.ґUg;6 NgێIB.#VÀXE!FG!9Κ5D#ZGR= zc(;o0+0@>ȮpQ-\z$i G&/FP.RTScf ApZeAH&("-=˅jjFbRp0Ÿp xͪ&HjCLPieNP$H%lS $'du6fH 1\:5h^SҸbزe?gÔz_N.FPT!Xd' Gv)`0&p[Ʌ~AavCrZtLp#Qk kz+ACzdA%|ZZmڗY)kYqK O/X10fcuu \sI"+E :Rp)G$%́cdA *i'$` JP(ܦ8#W )3Zcߚ']$R]7RxOqEKD|O) fNB !8h u"-!LrWPBD ,ǎR qدj7RS^ٽ6ۛ1"eR5]owsمvZg g{ հm=1DeW%1==~`gxL/%&6gOon&nZgy~OaLx3Wm%Ꝍ Xd9ħ)XG3x5z)zאDA/'e-Voo퇹%P+ýٰ1K^ՋJCP.}J_nC6MQ-kh3^p}~;7WjWY mFA9ڹ%fem;s!4)1wG^{S|'s)iy;{͠3=RJTq"B4uBx#.bt "SJQyQG% c ј/ݧw<# ujBC {Qy]: LH2RVGs\׭aC`DEFr].ꎜZuR>W8%<)"u1HB0{W(=߂Z]E_Y@]%_0fq~ZSJc5iN 1CA.  ͗3zŗ%&X #%D:ԫKAprG|BŁ"zC84˼Jf%ےRJ/@gٷVqUupOSH'/+πULVfZq Cof>O:6r=:P) N@& WD7^$F+`H''z|2: q9ٻƍ$Wz!}(BO-ᘱ^P(#Z}1}IIPUL#a)U_fVVfVt s+?؉eOԎp?–gŧw;4_vֲuSKiھH>ŮB#= Xʶ!=pEsΠs/H!ĹJˮ nZ,\""LRuDPe"M$f t:CIcJ^Jtչ|Ӣ+3H+#”,4@PEZW.VB$؄ƖD,(J,74*V誸ѵt,cK Cql:!5JFD1'F$IbEh'$=uIzjWbzbT4́%H2b@8X FkeC0mFOnrvn5TX\iEU^0g!պxtEZ#̊8u:RԬZT#c;5p!Su3TUj37OarT`&9hfX!5-a)*3 )0'B Lc&ҕBB) FD;Tim ~)n_f(w|=4]}Ƚ0 ͜kZNpˋǧlҵ_QT k"Ri1p|R-Ju=C^4ZzC=N ׫pqq#F/s3Xq$eĂOxҙ1/OS:Ԩ6;x9m)yyP*Ok yz3jB~HK\.S~*DU) _UT$'B|xx2}iy4d4SZ7o`*~BLjV*RmjE "f4KD"QWVrrB}% ԏa!ke/;fLkFoG\w2!sE8ڡ-r9=s\%iװmƪ!ywβIo/l$CkN>ރc7> $ N.-sHf]^+@>AUajEAZUDCz)8t;Fv#xx'knd:[ƀٺ>!3 Ai UF4߲¯VT"N{K`t(O{[p5aBA,e6`~u ΙR(6E^FDYSf;} tÊrNQܡpY:_>[ ceX^m`?Al2ǩ^1'w|Y3:}pË>o.^pYJ\'8t(*fy]>׍CnWť,6Qz7"Pƙn֭bLq .TZj`1:C2OВ(|0`iJS,EvjDչ"KI`rͦCjZ"L@F?|a}H vfx{hb44''8 pz)Y88>+y㶘K4IOiwؗA^?uo^`y`9aJ4XY&B8 2 DLG_jާ\_NnyZ1ޮt!{j xb`SU:dƍ^9њQګFPbun /d$`2͓,cNH֡k!CFQ,)d1BvP%+} oXme=M `W94TEMh:w^*lڐ{|6 .'Hi@ɹ\Sq O,#bDI08T!AQ,P޳ު̞ XsU j>?Mo\vJ uG|& |>NWrv|1|p?J*<7UHyg+HC||ѭ(r陿OCR5 _0n#2]f*//% Lw˜z>~0˵n|"Jh{.JH0y 22\!2 J,,3Dax( L-q$?W$Gþ6Y $h\itFLuxR}QubB {<ygv탥@$-`8ݑY;H㞴 ҨeXƽm0m_v= Rݡ .hC\3  CfLsrzsoC~V$V>\]| i *Ui&W_k/WX% 3$UN}g>?[nuz6JKĄU"86JcNV:212EG[P")tFMY$BiMW-וWUSn8Ք+rpUF!F e;GKZZutmȭ Z'j(7-}f2e櫜r˹N${Aňfs(ye#c;]Y).SL(pr2} n\-\l iT"E ?=Q)3.Z'{dZZfض#%*5qL\I}Za8Ş;8ӔiwMpro'0̴` `;M*bs9RRF,4WaD,a "عG4H0c^\3_^\!C8k EY*l O;!0F(ɢl_a^{a$hR‘IZ%ƈ aw1" 7{VZq};U1^ֿwZK *pz>؇lv:-pr9=$_P Pـ.z84w܏ _ l 5 ٓ[9WnwzV%غ# S\(X~YC[J9JQLy0ewDJ/t.в$JQB( J+*ġѡA9Jr0ѡRh_@W57ބGx[0ɢh?h_$HžĵpSګnnX''靈niT^tnh{yS!:Ld&r'=Bx3{)q~Obi]]dєj6i~XW3T4Bh[e<=z0_®>T%,N=+N{'b=Ze^ȉqƏMCq n{c& kJK;s=zQd+v O*^Bho]&Gk2}EQ=[xp|l;Wr>ywβ(t${߷~ gJ7mD[tO58_.ҥ+v"G,, F0q)E IKY]%ِw☃ĀwA=[mAp̛n.TR]H|ih%* hJXxދ*l1An2'?Z' )5"cu( :M[AbЃh@Nh kuSI1'S*19&B cYsN7eBPЍydzmmL1CeB7%]mocNƫm':ԘxP*9az !B:6<}ziW fTj>qPda`k$#ј8C(?FD ߙu3^fo<|3kY/+n7\!)oLͩvWø2א~VHc+;X_"Uh8ofs:|U`ߟU/ܵvzM!M7᭷;A' Cng1u_}%)i۽+ sm01 ,LJWc >!TcQܶ~73քɡP3sK:nV,w&p 3\ lItV*J+%?UT gԽ j W6u:{̶fjnk6%TomW}w}vY_oy=uϮ79Y݈s{;0,l,a柟)810XdzO~I8O6s%`i`vG0h_h:w:ZS@XFXjd((),FQhihEJkCpE$ W+j|禎z.6jW}RKj>z^J#W U'tΐk~Ct1֒vBOrk^ObX}UI*znx=RX+&TI-ƉGtX5}1|>hZZQٞFf2>S~UZ/'b0Yd;$ !b8/WgG3IS540_}9K|}+1sෳC:ɾh:F6sKxnWt0r#efds>o|'O|vʘ4}s;3X9*2cv8o΢ x=iŃx}ϛ}|w=7DC/:<;)ʜ睸͏ts*j8}\)ʉw7my b}V>n?:B*H,%ĜX,6D$!p(ƱbID2RϫVHPAW@B_/lj˳M?vˠiC$hH4!`N_DVEQ$qtRVZ#$< Wi#3Eْ3gjxo#gs*Ir ~R?>7S ~~,2}\tY>\)۳ڙ+dLVǔ%)O} ) $4їر-;NJcOn/\pN1!_fao~L~dxٟFZICbfnXԀiJ,W+|D9'6lzw끣e?x^dz|W[$?G[.+*!*QF=8JU.x1FjO<4CF5FP.na}dXY9DC i:!i𱌆h$#2ͱ! v9WL&b-L2DȏDH/Q D1k[6Gw.m/9iJeGy(I?9Y/ztՂlnkG3s"Ǧ$/6pˆAa>&pK5Lȵ!{b&e>nĒ 6=[5~1^xȼl Z&Æ$gΕLRi\0 Trb 0e=;cuX !7s!&rtQQ0 4%E"\sfst$%6n G9lB5C69 ÆMPBZqR0e#nZ&`UV)PF&6Q5jD+,48viVJQ2vc&I K%p|>ةP$ad9޳YJjH8Q=U%!c*n詬6Jg--9 Ap̻Q1յΓsQ6B92‚( 8_ŌvpN3"L`4," &9&-iDlY߿yӠgwd۽u^>\ݚ;sA\g-i*18**n2w?[)ښgb::@'2,'Lϙˑ/PWR':ihᜫ =-J˂S4{̇X:ZZʜZ (%GmqKBE^ 3t|T RD7W)[D2CJX޺ˏL6^l lU)C`q5zÛ}5=txp[f^VOgqnv#q 3'~#NDZlc>^KVd驀TWtrMN4n8njE)'N-5<@^\(_ !bmEY13E߈8S<-W}EDbFD<ͭ_63D)ꇞd&桊)_ GBA2o{u ޷ $- gvW;[>ikhJsrKo JiK))б}gJxX_Gޤ=Z%ɔ~ 6Wh P|2?X3 X9G(G޽ {x*U̻ԔDCZ+ lC8*1J As)Ry+ǫ`[!m;r'z5Z1v2{:iqwwc=Ɍۮ7 y)Wöz@*I7"~DJ#gN>e ΩugݰOy8%SIqɧUd+@r(:GDrL-fjC Z/iN /I{K.2BPX 2 *Uy9WVnؗBzrQk`ޗ\jzZlZI%AppeSl) Ћ8`!Bzg[:.,[ A\)PDС 0k-(ᲵNZp0T?HfFۮ B0yN};zgPFzz=43맧]:Ҷ$iMo88h g6hSNoDx&qyo ^Rq0Q\%e6Fq{Q,[9*Y4I+T3P! pQULt\*AGbOezЩ]u∨fN*#!MHji6'F*~Lc:;q$)rdx,IBY2Hj7͵K$}d!︠d(%ΓD9,ԝ<>.< \J]zўoO#֭҂֭ޒk4uCjQKjB GIwdi΋5u4Q-XO-J߂i5s *Mjbo?].ez=܌7~/?>,W%d[gрUD7 _m|aS`K!\C翽>U|y"xyb{Ы/>sOpk[3GQ))B :)g\qhQ4` ]IQCy,aPP& >#g=WOׄ(E7XiawGӍ]px}PO0.|2;*A ھmKd{Wsj Op>})O7Sojȑqtk9J7۴cFшSCF]jjt'etR!5R N]筱;U\Gj&eEwUe$|\e%JaGdrb #DΪ5fݗ8v2$ eR-;A¦"R1'$.ySWx<w=TZ;|:LmXk<owVa-gŇjzUP)8i;xbW ɦpӫrwWob0e:M#apFoVv&ZI˜U wInԀe? +Ѭڔ8+g,Dn8Ha9HKR3yg-T|,SQ\P}&=J\Nu[w7ŝIl@S[hOuR%ƚ뭍Oo/_kERK,x;n}Js4”C)Eiscj5^ r zN;攧#4 E.ss>($>2ǐ94!$> ͘vͳ7tnNB9r: GOZG$ b>^pFe[V=ɒNkj% &ÿ$ Fuo=$;B~!D[hOI|b`HvΌU+iyҘ2zxGZ2QKm2EW;l/j"^aK#AzM8fQ6Bfqsyv.&b=E'^^HilDOyC>I~r(DT!E^W!fr%hJ™(bU3jGrdYպH+G5\k+4(҂((0U0Ÿk kyƴC 3_.O|iKR:SZHJy)-Dz*" ۧdvio9;}-").o4xjQ=DsqboʝI?C4>67I46!_9f@D;I78D\&2稕+ןIB :̕49WY4N /ݤȕAR"Rr3I{\\@sEO)$r`p\k(cy~MS ND>G3D->FPsBF2!RD 6@IRzE0tzQy-bz?2zΪI1BEu Ͻh)qT %N؛^TSm0:#AtzSf1VX fR 0@V-f9#[|0ݗ?auoB|~VV}oDr:0ZUT/+ s|ހ0!|C4% 1hRK,O_5LJRQkzUT 6+>L7Ie[+hխ3׫6@ zX4 k}b2vy ͇QBYT5 uoWmFnof>_.ހ' zz4@B;5feϡYQxlN?(p$QHN4(q,4Szѝ"d$I zm$4t D:O^Q|'JK ѻN5Cݫɝ"V $-]aCTQ@MR5R: wsR9hz7* m8Iٚs߿8%&ޮP^9,j01e#>l d/C>Tqxgg?uDά߽>Mmra/vd`ݹ_l\} ~ +jz%GQOg[n6WXzͦP:N^ˎ+/Rꑭۊة4HJ:EyΡ=v%Q@/@w57QY(+7Q5pf=k\QA݁_r}b6U0 v般bК) rO{MtKRO9n֊sV=qZ67ngTڪ_|@@<9=VAULZpP;pAs+{A+(/jBi6! N6+1$fb CP):NLcB'@Erd]Ț01K"H"9hg^䐘dYe3*SBFӗ 'M"1+-G,I?AHZx02BR#1=b4$C4#]=&k)# jac-!eGP2x\56+ow0hy9@x#G2~:uz͹7]-YbF4|X W9fM,H'g헍& }ykN:)XZ,O/Ir01l VҳbNwuJ`)4P]# q͏3/ǣoK FǣE<0Xj.b釴|;o013dhmsJr7A'lN'Ҽd$J!e7a5Nn0AVY ҕ)(ny&OO\u!?h+ݪ>?l"St y$q3S&v"kyq ckV4x -C.{F21kVy.uu !_&e%r̬=ȅe@$(HVD;q()phJTԉ&R,`=IƼ~ԙ_v&]7AJL (,h3@>Cb'(P*0bw?9P"S'Af& RmbLsl,l#[a5!7#>?u@(Am7|(2*kjgiȵ|;&P"mVM֚5kq1sqUE{ܩ&QhH>OHWpPŨfQzs믘-b%oWMsFuBk΃ߒ7&= +7#Ł]ڎ'E_A%0+rV^٘>Ҫ94r5E>23њdP']JA" w)t"fJV=+s/AeDXe_LĭE:)XbN?,d 0zPEf扸+L:ǙpZQ/If5d _}!;AgiI[yf'm4kR+5;ւLaJ2X Ak2#mqA'!3\*"hgI!.)V;?r8jY|zo t_ǿ~w{1fbLڻ+ϏFU̖:~D_?LGZ~7GdR(l[03 b* $uۣhKWjs4:sVJʢհcŠ<)PrAn..hֺ p$[^NMQL}4y_sAukRZ(VA<({1k[l 쎀dJ= \WC*6h/(| r&ӔX$hLNqJ)Am}zڕ3?O֣ -jXaW(pV1gWAFx6"`b5ܼ1_@:Δ-`}.wwPPflS+| Zg}{+'E"OD,psdYN2+NԏPl˱Œ )LN2+$mwט`hȘ:*xhd1Kp**FLrZ+Mb) & lp:.dM[0.x %+l\_<34-57I)Ik9 rY’[sSj1Fu۪v`!!3Ү]ӔR 顼HJ_)&JqYz-I]@ƏaH) ,%ȓqu0+?[PZþByD_ǘAWw$=bHrD*Ef7SLf^o ~e߇ )A $jUkrP[0rLA# e܂AvFvݚh j#(O8~7G7;~G:Cl @4)L3̧eq A.s夝=Ɯt;Uz9iA6t+Ցu"ڔR3Sǣ D%"ǩ;HlY aO~7M=Δoc(+;IOR> eܤja8T4Ί}ry;ԞԠ-fC' "TAeS0S8I]!UhaQ(K-d3)"냗*C:C6+rw U7$*lC/"YT;$4E)'0wEIɢ@p9YERhS,&B94[ݱh.8Wm+=i׭ |+pTxmVh.aW^<>T Tʾ/_fs1~%n2RSdz[3ӽP?  7׉fzu3}gJjL})LGeӐFGNJ1ŏf0#>byYU(1(@'Wsq^L%eӒߝSbF+%k5ѥˋF\]]{rUfO޲xSkϭ,/qˍGӫw1?ؠ5zϣQq4 W8{* o7"3S\_w!b{%XKXs""&! +kRf'.D"kW~BQd/*jEϳU&f1(r LO[$>#dfg+/Y1n1VՆ|IA5YwfBv;7`@5g4h+p#8 H|Tݮ }Lj?}ڳNh}8O%.SH٤\r.{طԥ[xYpꨙ],Yo:TƖWYunVYOVMͦ;l.> նGU1soP#4Ov2u}r7bmjO+j?xᘆ{{&{g_9k1Kv3fOn|DonI^ADJ! ͋;5# p+'EO4i>iKs{V{/$$á()Șbbbuu[ra Zū)AJՇ%hD/]IW=MW^1N(g1c@@ȥ&L>Kp 䐽t."QwkYVHu~dev/S3).jf.yr#t`-c Hkme--cFHkM~oWm8f oQw'+3ҮY3N; q$*!A/ S!LɄĢ_ (@uJ| {vïzq :a?_}hՋixHe$޷2Fl,q@MvI6}lа"X )KmUۏVlT*gC8]b9g), 4]lj2[tW[.(L)xNfnn5ʣr&z]ْ5M4s-{)ȸ,iY+]NfV>K̘+$A .?ocM{c?GǣգE)zKWTxI{T[S֖x'k\,\{Yz\ N:L%*f,u2!NWS3Ǭ L&y`޺*G,l JtBB6.+|+8/}lSWDat:"@і3̱.1jd6ǐJP{2( ֲٯ /#χպuA?)I7[q8!K.)jP cq_7GCi9X)&j 6<3rU=5]R֑_i>7HfZ#am$ȗz@>vrF$u$_ĎUR9htICR$"&@3Ŏ8qS_>;FW S+~oN"oR,r  G<'|"Ffc!|%^3RDVJ fL% V`%W)dI(^TCH~KAPORy*^3SyfqUW0m%X_3zK!Hy*eA*VNI \HYZ)^=4R@ɭn` ]Ȇ _<4QU@me[aT ϫ̥D(6 C^RBAlܝ`J#Gy JiZW<Grڔy5oiC7FsSG*|EMkr85;Lk4A2PHR2F1u_.rMFXZ']1? E5z+1 R Җ#13>#1ѯ:a"~~wS*<~۞ MC< IOkU)Ft .Fk%R-J&dBxb Zʺ)>:V( #ٗkOzÀo+_7{r~[ VҪz2 Za!b+춄 b^uf321rߢj\|Kr1{VOX4;moL 7b֋0F + cTA(8Ȣ4 .e= jJWYA6Y|~3UXשWtz:cQJQ9+&*`=Kgc!IP "t\GƾC/I(`(!UA[9P7€3EmON:/h&db:;8ar0+ v{? Z}=Fd;W9ߣudSr_p ̨{;t}FKj&X n}5td |uI7&Hvd-0S"FA\^Lw7X=\ J}iL9tzfێPxq׮r=,C idb3:LDVh\h /162qtF NĘm5,1faI72g}Kkׂ'yPO"43RoYN(E߼7}EmW7:|]Ipܷh3p$PR "޿4S,U\~@ޞvjMp\X?Ho#VSs9{o& 03ٕzdmuG#sYo)'G< ۼx&mWw{~a(^c`4GRoC-o% cbXD`4HK47o!k 9ߔ -Qx9R aoӕm4g<>8%k\:ިAJ-klI}LS 7Tw@}eH`~d~]h`fcSP9%/TqN:l_F=ZA.Ba7CC;FM hٴZaO~L:<.&GaƱB,Q}S D ~'΂FF%u<)˪:h vUL }A3Z  v_OODc3Dΰ+1E*M[;S={3wȎ&9F'-rIƴsp<;X$ws5/+x% }p1zedp,%SΜNbS=i@s>pcQ#,T\ #`ppstFPz s>w,jԼe`az%gH3}Bil#BCGڨY??mW}_~xu[>Kd|}YO~(%~B 4bJڙ$U*jXTW@pM JJp|H¿KF'O :*/ҡYUU o%2ԎӇqP;NZ Tڐ#SP*?hʀB-'$Dy_n= e}w׾̯ȍh^MG?0.?eT|x"-Ba.Lft-kRRg+~Zl/,ċ姵\]Rb5hVAQaYŲ+mf'?70A$NXpR`c?]CDo3s}Njwԅ6`Vo/g"ђjA[⥧sԡ{hU)%E@ )eR9e yfeJN! kT>(8>ӔgF+  LuGHS'T7~LдSok0eXBN88xVjE)tq|"+hva[e󷾼L(E:|nQd;w]VH ؿeɻ{?NEQټg-ڶCx-vdYDcJ9kMػ3S&9?][<ӆMO7|1<ե4]`̧} Te]V>[1~s+D΋ElrNFnRvO|OͧA Nx)PsG/ D҂$2Ci" r dƁhPB+('_pJnZmZ"|$n_"c3_޳ܨ$^iBr5staF344S @GY1?ߎ--: Kx+I37-x6`X~b+%_FTN;)7z^J'M zz7uEIu!*oM5 A%=uT/N^<fQ,|>Q{us HnoL 7!xDOnmBj.rn!+J_u{v'uV8jJ KX#2@%x?$L˲xJv{DKTZ`H5dW+R@~yߠqHV NuaQYzKI3Xٓh 6xe?}Wp<ь՚wwBHsF@pYP2A}_PԢeuPW$4i6Р;{J\|(&F-ϧ] ^v'0-]Vcp)E@KGOԄ4 tiF5Lpprk_$(PP_5%xCAY*]ǔTZ^iFފӚw3;y ŧN{ rXJc!Է5u[3w1YOc Y_'Yj*EFs-+-@\0BKQe0q $e$te~9?͌[yjC=|M9Ү+/ |@@Tt[ ^Puh@Vv]4Үkw?nH_̗C',EVXľ O|Ɠxf|:W#nZy8izE; he>rq:,ֱn3vֺONtCvh7hr :E+R jʉ*Qh"῱~2<J&!4}WCl" (٠"Y)h ƒə>`ЉnY!Z)mlu?hUwaxYgnv9@?ØTNJ>-}sOD aԢwnލ; QIQ }v/T j O㣿| )6Zj[Nc, p~v}wWW5dQp~wwù )=^BK[K6v.~AģfonP ~)u)w>B#~2)3Ok҇ZN7h>~3#fx)Mp 0^hvjo  "`2XeƖ%=B1~4oiYT//n>4[j7X}rj]dzqY8IJ q.,i=>FA(a[4lߤ!1CB9ű?K`v1ϏLѮa\;z4enhhl͏kZןQK(Z{TWk*Lx eΘQ. "N ; !j)ABTU}Som˘R N ƖyžMR&Khe`OdLo翛i .>ΐ<]<¸:Rٔۋ"svWT g&`~ʉÈdJ 㢘4~Z6.cn\c*O::&&YG&='Q]O6㍠5]^z =^\Nl򫎒SmSߝ>7Ž2QD#ttf<<Ď"< }̰Б\;}7q]v,#.d1d!"5;ǣ kwEur4P4YFk#B<\[+Ef)bhw)&|Pf|4,F9OjT7^RvRNjIm8v'(׸"ipXuɝCH!XE2z*|%b}X."E׮C S$5.ܗzǯ+hD%lAȠ qѳ{BY)bT ,w)C ׎#?o.9. uD֩z7\W3%DDPr=;V)LI]LѰ}cĨ}J YIzAѲ;!8n2f6l* "ld UbAlըN[ eٿ''dʘ2Ng8!s~6Ygsss^ۻ~dM~P3q zrP(:l,ֆ+>AeYJ%t )|1` ٵ3E/HyS ufryoٌWa i8Qc,5B7ɠ،ZۡUY`7T9Vֆ"IptBoGBJ l3w,@ Rk쩋n#(l_CI6=-)*ᴶV9d$s'ʀfN3u$sOhv*8Ԉхϲ)qhP -Dk9&'leT x̩KT]xFlI8r8P, pR쇭aYzTH(fR%2=?#ϩݤTL Jpdoԭ #B&nݕ*,(R+AGc"6u+*jg)22dSad;C''Z1nШ~~ēmt̊{n|bHqt2Q#qԦ(q‘SQoFcFؔEUFQK!^ (+sHW'Sm$Zy ieN4Ns$UJP*H ,@ j<e.q ‚qTP^Y9X(6٬)Z ĒW%1Z 8ӢI,d4cIWPj+jn\j",3q%$;H6`sP_JЂL:bhK aXm g@@yф 9"\KIr'A"19* ,C$Рuodٱ,^)gZ=,z$=tiK8fb3 # ՝L5?$Nf݀"d3)s5]w*$"r:-`tݤH,mA8gpo{M1QB93.rMt91^WYfةALKS%u˻ZL9O`>2}D gMHz{-qȮY[^ऒ+pßvCҖHO=J]RԀcv)MξKSO08!ehc-g>B2&:+UvZng~1Hxo7!ZjI9>|HuWlUM31 6o8g;ZMmaG^^3dv\TgcdntMxVocdgW6Z`w+Ĕp>h~FLO1_ǣ20;Yy^Me3ѣ-u,FSR=(SIZsުtcDqX >U$8#HUҿ<͒ 3F7v/+h/`YրMZаMDjvӍ7]8Y‰._n\BoL֍򵿻 6I!F>p_aKm5WˤtB|;^4F{:^" sWN7~CuX/C|m\^s=wS7\0!ywwg)Z{;;,7ӳZ&#րqgv~D!@0F>2q鱽3pa|qZ-2ޙxAܦT׹}Qʷ>~8:Ns77{?a04a!i0JDŽ_u[7'nVI8V84c<{`3*;fR]ef{f;_b#*~)k}`9A5-BgWԾG j]5txTp AxGE۝iҾW ࢿMQ9o*kkVIo/;:R SMRڕz EcՖ>ftv=a*"Y feExuEGw/ 8ZI`̨G+7 TZݦgq 7>?Y5|yX%MGEΊU@vS%ij tkFƩyv`0fR>*Kt]늟ߟG RYM#;C!OZӠ&}m).D25A!&;~5@U+ka4].."=W"=`;T1_W=oHwygd:x4:2Q8r̤CCKGo$W 51 zMgSc0NWVej)xlz?XV XNACݛكK"kNMn8`,+K?c77Rt_;=OIi%n8VSTsn}@MR/>s za8KK"']`n.{m)=KQJcXT\$!߸/S $+pP̮!Ku,ת-d -"r2/^~-r[ЗS>݁MaAރr` o`A"L_?*Ɠ4Ue@S0>.YT[,n޾ oEF~dƃ{?QίIp翌i*Z1Ч 3V?2VӤ7߽y`;n^E4I$N0 -eĚ"DlTPibwOa8^,20j\=QKaU |MAw&<Ǖg |=ehUO+ UziFF#j0$Xv_ΫT I/`rO\nݗ d U# Γi5 xE$!G:OA \߽lF v8^S@.x66\tm™ g%v޸l,k7.BGs!nr$ROcDAnoO([B6sQ QwL@XGD1ssyg7Rl%T^K¿9ʋE0q2F }t^I"x)(4ZZº(U.=7:??yǵGwTajHTP6Q^(;Yy j\6D Uv9` jPf_<,)@cJ 'De)WOݹݦHHm*M^tWX|.h6dRI è(ـ|.sC8+wCs^3Y4 S|RP_?,J6ZG}ZZ&Į`J,xlvIFQO54<,x,WW}+˶kw]_OqIFk|I,sTlCF~3O S(;BJΥԓt:'Im$I"ojFBDJ8H4rTXnRPf k,Q@^L1o_zx_ɡ =TcI`~n$% *zzf^k^˧{'pw7Z)UQ)RMuJb._wh2w7txW5Q~3xہf[@j)$}]Om g9L 5ۻUP$(%!#0 ΁0Z@1zLXD閇zO\ (]de6%ϷIW5w>6Zqi*|xjj-#\?=}FYf~mo(.$B |2FR3&*ܿl(4S@sl拧zZ7C@L9P+zL4q, !J`ϼtJP+SWjiRT2DL:^k3V_W۴Ƈ0 V(@`X0k45ă$`&q+`j=^x/0#ymy˝ E*%5JKFFA_^LQp'=8GaH%e02gQyM0LJ!1Ab/c׵3㗍sW<̦.w`;BhM\ypJ& 0Á!R #<Ց*}]SԲV hp7bԆOX8K?QZ'm.|nܸO?~\K-QQU`̘'h<c,XFY=nr~Oŝ߃4𷠥"_e~6;pÈ4L ̦œ w>࠙xZi^ܨu Y+iZ9w3/N-TD=6]38hSyG|9twQ79wKNҚ )W3woO7=T3{#˅3swv=Z8PÔQ%e?"sNIAKKK'f iʳ'I{ ؓ2iۭdؗaH?IQmf\4{SL!VHaaL$e!m(omRf03ҿ3J H<Є}ޤqOs ]Y6% w\;x 5K2OƲg}up&%8\iQFx&o$z\5ͻ{x 8.շT k$ dMΈ¨ꖏ $e isRFdd Aay7թƂwkONu?xbχvn0hʪ#1_'¤P3Z&lqnߖAOϮ9zV& o%oam~pc7^>lsl˿D;pU\ vh;Bt-jQqas`X"%%kF*Re1)u\+FV%T,dŖ[ [@ HEN)t[ a;ІZXjY80Og]sWA+p/fl1yدnqsg2[+tlT@6P ?Rl;`2(:({;0WYA$xi9H*jO1]ʔ b?D8%DQ[BZd^ 4do8QmVc>c7WY*S7`g9x*tCƪ7KJp/[:Q2kc1┩(lXu4 L-c}o$"_b6,7[Q_}_>_ N=}'6H*De%}e0tgiG Еn5 ׾{A"﷼{XaIFϭcy㮵/ *nڸc^rX )o;*7sb42( 0 A. -LS8M?\Mw @g7}?)UsT9agP 򊢰iZ &` $rDx B"6wup[_pE_7ߞ̤/A+!ѺiEZ~OH@U xB\-V X.nE ﹮m.QQ~y͗sͳϑDI:+>2+ӂ:^`ۦ ?nIM1*+A9gSL<& ,WT"0g -6ܑj=F8A-?)8uX c1M磢]vIFOp,d`њvZENm*fRRi9VxIwPyg!VX S q)42K-GCB%DC8pЯ@ ]% MF/pRr 1!KOy@Ez,dUd9aA_-;0 F1k(P2GsUC(q1c('ԐyA 1@kMU`& +)iRT whDga C<Ȭ`BBrBD6!U$A&m`&5MT83&zbBZi BGYl`??߻{[w:|T-bAD.(3ZfvX2:SP~E5$)*g6'?~@FUxq@`Qk sy>,\UaQֱ͢`fԉ/k6 =I6 QkxXb~[e$D[62r+3Yb{QKiX3H{R?Ƿ1n"fhfɘ8#L24y#%^z=1bv+{Nh.][Lqz1wczY" <㕫)'å(`J)DT--;zQ!n:IA}I(6yܞ.@Dv5G:pTKsS︸Br@pJIlU⇋_.\EǵVhƐe)!X qyZ&7Wrmr-|Mߟ2 NA][oG+^ꮾUC`{c#DO_dnt[Y?CR I$:c|]U}*ţ˘o(ɁZK)GU@h韘y3_@Ɍw%/-rG ƨp! 1`dkn4{ӧҮŒ8,^f>3_ɕxkGNµ~~m<^\O>h|Z޾*ەzk#fc&Փ?YteOy&xcLW] k'lhA$э_Ml;Itdz1~;%[3ɵ z_^]z{w>=]>z7|7 )=B[Rъcڅ5٪O\FoSjnZs,-y^r04OE9Z/v[K9}ߛX,Ju>wyJm6hA|{oebK"V߽f{5ִ C(+]ekqm$ݨxLIpU"F ǐeȽ "O@X0zHFq>\\IZM0$,eպ(zTMGk;D&bOoxKJKkO޹Vȗ澛}p4#+5݋FWdk_6"Ȭ?rc@Z.$&R5,+.dBqcAOg4AJ#!mȫp.3EAQ%9Br©Id)-γ.@IʨccGj_vllsϽc@<6 &/r ;o1a r9\ r vR,8A&ΆLc[GnV|8ǶG?bZǶǶh2v-vkP?RJo$yϬlt 1$A$dR ڑ_x)*"=M  Ղo)xM%m+ԏ7?偉5`'$3HI)^vXWдs(GeF+Z@$1?$XI ꕙ7T# j^K=_ [l5='}57nBoe]J_ avmǘQz_MQ4t锳t[ʆ!OY>0{8 3E.ŵf7~_S[ ⤢Y1!C{!>zݺ]nmeP:mhb."Znkvg]8׬U[[ԂN1Y\EGκeuB9D) ֪\gpbցbu"ϥ8w4~;0\8=%/X<Cf_(A{ysq&/-,eYvJ6' lH;(/r0(L{BBB11,")nZ;ocOfV4srլԅH`Wk4dm6btc ڝf؅I_"gKg޻;#2uf땅KYܜ~1>8#3a>#3ah A@hJK y%hB-" V}]A5sS쿴:*]s-@/zEdr%yI\ GUt̰sf,;/;l(s)g΢maP4C+~.ɴV2c E!41*#w NEuRVZȔB`Eˆ@cgG-RˤB:F0b#jORy牌[DmX(W0/Y2vdʧ|&@Еz;5,:h\3䢦DE6RV=!pYc۔E{P+eM Z.מ2;4H|hW{漗a{漗a5SdwO &>$Shn8zoEZG)3vmj~"E䛁3`_\`k 5`O>"Wڸ9-'2I PH o RRmj[u2bZi5bٶe-m e AgŠKR.!<3GIouW2>Dzmn&np]ziɭ qB@6kOФ\%%lP8XC⡑DQm)54kf4sr4^HAZ-/rB*|LD N$u2ʒ٨1i&j հL4:mANg}"a,yGNx zǔ tEPRqno.OhFcty%tREԖEfЀL$HđĀdg1dt1H| %GN`vqFȠdL zHɗͥMlUh 47 ĠR lZ6L4AeRMP2L2YneEf6F\믴$ =`Um]&:ĀَKR1 LTL—B`jrk&nLQ݀"64BީA4<ϤEƞԛ|"I+mbfGRރGfi^NBܘyw BR@0sh$Rn [nx@ X*kD̠zKJ⑍yA8}q$sB @|p"Q-~{1˫-EYֻ/!rv[zY/5vvЏw;b xmhE)XS)8 ;ۨrn3PfU,~53 YJ/cn3Hƪ.T^2-YK g 9cfK,&`|g$NN?=)$p'{?od˗(~JNzZ*9-Aqyk_铥 MWj/}ľeVC@ oY-uΨcMwiD?Ɋ3. ۓZji4I`lї Ԋ^+<9٠rIz^~nK"L&Z}p&e rK&EWm[بU^[jmwZW:xQ_[^H賈$S |16jkhΘ{GLwӭgr~SHnhw3󓷣IܫI$>;ΨV3 C誚\q*BaQѠ1h~ 'Ȭw+G+Uet$k):o&1'ۿ;b0[aê0< 0tXX$(!04RN R(M ~*v|ؚ{jge6hպ^NLFUK YW8)j|T"+i:Yu6 T,\Pњ7ZyF.d AkgcW,(}9&5$T)(|$5W>)&< ߄A\i:'|%FJq,ʔW i>zHd+ŴKIy41fRsktse9 3H?Ifi̫V~9T1ҷŕcLYI\ޝ[% c4[M01!kS tIL7lG[NOO^zH :!|-_H{qHEz͜a,>)bq >Qmh\t& 80IIhi&p @JXfssUT%%(%gR"\Z:C!H´!oI>XE1&^YRN)HY~JIɶhCKZ`D6SܬYQH㏎Wez{w A;o+w?R!~z-q&?d3Jkۊ5ݜSEBŖy&H Z^n6$Rovel7ޖAOlfez7. H "H3םl7n_4VT|- btKfcwݍ[@6p $S`U V0hJe,O bL#}ٱi\Q]@_CFuQG/^Fh>a"1 !!:{3Ă@' ˰)lCQmɪ}+մ U ZSȔUdфK 0p ^X'$+.HDy-s 0lF)~N.*4͝p v("'`-!$FJƼa%4hF(2KLY<6VӬTU"lToVJi˅m6+lO# y ~NNABr`7HEH!hv ;< ]v..2[dhr1mJ9 DwQj @y880xT K1`\٫bfy aXʆlֲJ-&LRf|d9-\M W6cPeV)\$()svhLNa[=4{ShuBP g#?|6Chc3ޛnwIͽU]\B^ʹ2 iT&=|w>܍!:J,t.NKL$m"@`Tpգʰ\=U6 [P%B}Rx2SGqib;SMK1!wF&h㕿iYnFr?8klJ,OF}52Ç3gVL zŖH&M΋IF"n"knEP6+Eԇ׆F`\Oz k2"sdݖJvZI2_%6=zW4\| dEinomrӽ>qiJg k">[gT)8-zG ƐSB@QQ fؚ\:lTZ[!EIj4kQ"ȥȲ|)(Iw#ӎF` ╙hz@+eRcpH,s5 b-iW}a[W[^dt_1_NS|r%7|9?Amw}KS+%V#$"g~Os "5 z_v%LMte8 7rO)WFk$R]@f";+*6OG A!J!$BqZg_鏱v5M;( 8q^oTA5x;i/I.$f$YAPѠe%z` s`f/{0  S_P%86C hx-Arp g*)69}DccfwuL/֨ɇ^cz: Fzsyۇ}a(65ƔׇN V-D`YwC񺱣 q@^Av2ׄ5A֥;?UAW tR,+n7n~܊bUYÓ"rz [P TMbz(#4³J3{%IP0'Dp^kDB%MBB p)kj%8xG_ȪJ ܧ4MA, U{¶#`R sV5>~f;/+u\NTʫtz(,wyR"Nuꗢav)V6ۏ5]N8OZ ?! o0eCC cKڸ7 5z@W5XLq%EM{';*Í_3ë:8[g `(or4}b%ӾQ%=_b{|2 e^E-f`㈶Xrɕa$DŰQ^%Hΰ !I ],Pel+9@=w8ViI.8+Q3X@FZJ%\'Jd lNk ޟHĀ y9nbJC-S=d,_j\%-, WɤRLSzJ [ܓlTAϊX"0aFt4E34TL7"ue˙Ex yiTQsQ+&(o0{x=B-i= ԵC@cYW$N7VATn_ uūw;VFOUn(yATn9`M88" dr⸉VwR Pp~n+ҏ罐.eɗ'c5P^q=׿MSG]k; ~8kɍYrV]Eޥ/Ϟ6<0mf?<`bd={}ӛӓ˟O.޾9<'+C8~?A'QA&P'_H cH(`PY,DyD"˵&@ÿJ_^xsWmވ&iʅNkxx51-ikC` M酻Nf%S!.9%}xhԸnp4Ƽ10Ї/cө^8xwO_0U:xdJz[1ACo{. O|z5Қt ۅtZݴ6;b4-.8[M]C j;pwl&9W@Gq`<n@8pr^>jFuh+Gpԑ9/^otc=⥟}4u2q`CwOo.ﭮ}D^E5L~nW>~ GehGNo0{<~>hƟ@SeJgi<y ȗ2d%ڐ}zKװrqz;/%dlfv8^˅?lOpV5ԃ612;Uw̟ #SE) o6v9~l143 $r~ E<nHfN~ܖE ELo Zmdz"hc^L@y69[;[YY`wI=tie5مI nkga'Em5터k "7N߽箳m 2wg$M ̦'GuPxN?ήD4ng~835Ɗvs=H+u4yzٻ8r#W*띭yFp홰F׌:H6lJDU,F5fA&/3@&2ӀDp`Lwg$ 0¨ᔎeD)t΂ <PvYۆ(sť^D%PrV>⊳Ɣ 7Bץpd3,' J4"':s $eDcin(ªr4U>5cw) G %? Id¸dwTSHt9Is] Zd\t΂rAMW[!a&|my|%H#ne `p+]znz͇0B(iN%*0Hܑ(|(ZXڍ_8;hyt<>zn;8>9ėm:uO 89J ݟuqd0GrY{uzbp/,=Ԗ"lۯPx9{WSӹ#yc.=\.7eoPr^3J;TLaw fkf%o6`&;6w}(rvS}NJ;FYVg>h kmt(n'%D*J Pɭ,[WOZ57A_`DUb ʬ & LԾwE#kNm_\zN@6[ p~7h n$fm7) =EnTŴc/O۽𡺮 c;i7XTVּSH:jr5f?wohLxN1o^TNW,I~{Ƕm0_~,ڹ``7͆WϷ|Vr%oVS9YoVs/^:.^F ndջlׇYE- ˝_7Kd!Gb#O)Rr݂γPg F-#uѦ:Vk~x7[&,zZKdє$ 'm,A#ŷ:*T@)=Ѻnk@?Ӌ/QZIȲC \)l[f'j0B AK'pv!"=`}$:cSK<ݚTT|Vb*^xW^5ŦmຊYG 9|e׸_ Ct\[qK0TQc<+5Θ\]*t~$W?yaEŠ͋h{]/0#XXF #iŠu$ӂ$d Yd6$ss߉zN~4g\JIzB51(~;%:nC K3 NAIb2@Lmd +eVdKs]kTm!Mc_~rU.F2äOΛ?,{Jfo5%mN@ED|8cׄU^=@er)NItkPERv˘hCq5RG18:BAĉI,\IY/!TNu\pf` !D3hN`5s|JYMO] +$C!0*C9:k:i|("x6n}|lm<7VNQZH15WJ;o4B!*D(׌eۀr*adHͅPvWi 'n E5n-k$J0vs@#bۼY'p{" G= UT5KI$譍ķ֔TFuNDٛvq1/4҇m#2ܪy>%LnՉ`7YU;֚e }y *p{HNoM'F*,4KC:# SB"1)<1Ís˘l7غK*Ł(x$HfeYl]xh"L_f-GdcE .)jyu7 Z%D\E\Yj;m0-n>)߰qY w8yغo<+19*X)J-~sy5O⫒J =z8 ;5ɠ;IR=1kda# NQuz_2ȃ%#JJҴ$K(䱌S5/NPZ^84Xʼ`Jmy&Iǂ.`3(e(W "U bZ0ݼowΖY D`^ksiЗc5)yh(s 3tnj*B* VB9r[OrKs>~icd1fc3ƹ{t۩VĜ>[&hzmzէ{Gq;-{l(CM*ޜ=[GZŜ V> 1Bŋn?krFA\n %*q_mګ*ݩNJ|N=E1EC8Ф!&gww.n-`m>HJ;瞉^@>ǽJ{imWatJj^5*Y}JukY)=N3Έ]*""Yi%R}5"H:1vb<ւ"z;<,`٤ RN`#ճ}=HʖՅb3d%(c::y]" ѢoAd j;^pC{<6Nchۍ'w8"ZǏth;Cx?.xHm"T7bQff4 * b5y,IrN{*CvVfQEd8AS2-kD]5{2j?`__86xhM,)Ort|Qw3Чf ./Kw s0m>\xn>_8 m?mp_#!$Bmסe\$eP!EC!xUH$ՌjChةQ/pX.S7SN()5kxN~j*&9'igVsr%;3R%g\!AP9ӭv5{0|d+I1]Z~ֹ^Gj1A1nxce955%-C;pYZ$ Sw7'->N-Y8V NM8n3)⏓.s~/e5,)2H_?-\lC|b<"Lه" z@JoMTBOfқk:HBnz㎺D -Am~RAbx>,xgw2W,dC~LtaHOmSjbi2|)m>foLIlNc$P2pq2[pњl#RNYkp[up/(@픠ftj[K"DTya8= i?H?=hY.STB&4@0!*RHp[es- |B~STgݼ8cSP +1qXHB&:#!gѬ:#e7R.!SƇS-ܕ s0wfyiNq6X1r1::=0Z$q%:aT yQ2 -JmjNUIF{nNTUI8-:>lf!Ff67ür`G y(A (;SH!XώNzxo>2URvXOxM) =i׻ Ld%$CUv2_~8h}ykp3_V9k>&C g|2D~|V)\gUU Om,s=\}VMﳠQQljf'hz1 a2$4|ZI >6;C!Sefբ wQn/΃̨L޵bWzfZd8rWa%v) z Jq2<]=ТS03MzU)~*{ b!hHun oG ?EA(IwNoOdg)Ҍ%Z8ݟ`rc6] ]{}9o@2\k:mkp| -ZTµ -CeµH&L/w~Sأm[*IFz];]ck2]mEJAXv[HMyk3cRHRl‚[A &4hNݔÜ0Ĕ]-xpj.盡G%6B!Nud˛&qF;ßu|_\Q* e| v2o9 Jyc'+fi0rOBR$_(za4Bl2X&LkcUX(,$Y,$S @d zޗWקA1+/tǖ -c$ok؄j-j[߃jE 5gʚ8_Aewb|4^G̬YF!@ybn4AK:˻ӌix)TаJ,+#S j``糥r4#]?;~8/.CElNL|^k~BRNR4N$W*0KjSE%h'lC B`>n,mi2* .;Ip$& ۓFd CM(C{kɟ^k#\̱0^#Z%(%w=W9ZԮjI=Bkg%m`{ 5r`륏cԺ9hQ:nRYgf߅+niD]|z#2߭[r,-;ΌZ_#}?FZ^sSgP}uӺ- ӏ(>(4c3H1N{ K-rE.;dlK*ȝwLpGLG>^z ֫kocL^rD#OIeOF_hs7E>DB`Ö g*i 'B~?P>ƭ٤f?^h'W[RP|J6PW`wE~~PkԚ9(+]dCQ}% X:ԪBVOW.'UȔvW1ձڒ^{P'O`T@L홙= k8]@,$N1̚3r 0`pZ6Fͥĝ ͱ6P3,4~0xh^bJL.E35hߕ{rĀmLVMKWh9cax2!_lp1*br0zXM0XxDo6eC{Ҩab,:k[ Er/U{moèMm1^Nz“ֵ+xyBh&E0IY-|`d*O}/-RϷϟOO'ӓ7#y 0ɳ` kdV(HtgD-,O0#h$#gGb`MNt4ypD#vE'i9[pgX9Y|HI0'ߜ gP59&{DJ &^uJT=S yyn͞:_߽98j NV[Nm'o']+fX\}rf(RB]EB*+hBN+N*2"GHHK.DF`KMiiʞg̗y%34UITi툤Yʆ*&lB(iġhcmF:cQ g*yt"d4Rt$ QT]H*`U}}*̑v)̑ ceU 3Q;2=I(Rjfttɠ* fP@,UI2;] 7{&MOĉ'(;rdG4/9)"2K~Ret$^"`!(e)P3 g=uN1_[ jpXx!#v96ZX2'%9)I=00c\2e%-H8i'qI1r,gXGt:쬅o&x&w78B3(ur(H^yY蘽d7&FyhA@Ʉ;}rߪߪߪߪnDNLκͺO&wTEaΞmEK^*'W5- $Ipc$5DqB cPBA"]TljBŮZ!9oFEΧu))H5)e$Z52@!BBN[*yDzFI1VJ9%i/g XIx:iv" IJIAEl:TsRHJO8,<>L|J\-F4|U>u!:F$I5#'i֒w@TXYa6},Cf]h .WL ,Z$Cv,`{OT"vPx5CuqgͭwS }b ‹@n`FVXlx?:P; R9#k|&-3A3GجR˘7尋;iҚ6˖`z>$'J򒞴>dB=U(0L%#k ,GbŇ(Hb.P6v%q7߄(qg8)o'K~}}S˚oOu64a |;1NEa,9__>o~~^Bro$: Xu+Q?77ߐǫ3lkp1)@cl)"s7󖶏d(±hagÂՌn@fD܎*~ F0+=3 5fD>!,,*Fdmѐ-zv0̴n͈]1&"r9#ZF\m4Ws $]ǁ 8&PH%yd *QVrRF-8Ђ ʍRn!iԤIv@ely(xNLK]$1rrƬ=_K6QLMVZ*g8p؆8YIjyckn[ڮ1 ]# 58Ǯb:FrI.zSKKG͘=+:K\"XcC.|FYWZC+SJ SD,it]5&\@=Hf<$y\x P%vdap_`y|- mٜ&KBpC-mWIF+dCƓSC<u}*XtvQ)4W$ԐtݤYaF,;䃦AjdΦ/ ,>oFA%vE6[]9dM~%5}ni2MίWobAi^0?:8ę;,WʢRX| ՜XZVqZߛ;Z2(~Xn1/+o0wLMq3éT8Yͪ~LS" CЁBK4(`ke- E]}NJq)젡EZmBp2i<`B`@"EBhW04RK КnIbLUzJ(fV ,uer6F6\ub|N$juD/N(@a]0c͛n}ti ;Kѫsu[r^\㷇,VqQ~(4{0nMsLw)4IJxAC"/tC2ꩵ^g41m}{_Xc%%KH#j T89Ĭ^ ]z>$@"T)@s"'9[|s1t!d|/9^Wͮ|F 3-c-`}W;絮^+իd+k7߸.V$G aw_p,){D|6-ٜ>%C+- QW=iQkt'aڽўVkH͉ >c@B Hh)̮H1GrВYT ˥SԹ'S.ɰގ#Au#H֐lZ:or^1(t:XՁJr>gʚ6_ae3&#I:&VHhIYj)YA@%2lCaCDA ,E`dRg Ui pR J: ć5q Vj<|#bGp"R9K8[4_UB#HkaEU(̩RʰZF{2WSHG>ȄO?"cGqLkƝ:zQjʵJw~[|KK'.Z=Y3\IfZpe> Kl/t[Pw@4 LHSAep|ҩ30ȓBLm[A|+%0s TQX|0\ rF⼏j3+fRIm޾]Ee`{gǍr҃ }O.c{+oѓZљ }%slQ4&`) @'| 5S49J \]hзD x߸# I90lk׀MӶ}ɳH]vԜpH\}LG. \.5!T6rK9?mN?˥3R{TsxLy"pfwla9Lfӧ]]qm6p?w|Apz#u[={sn&"q3HF+u%R̡)"<(}"P ,oz?K|#2޲ LŠ cOkAҥ"M 9.~3DYERXPrKt~Yw Ync1h\ck>I+9Be8Ȏo[?7n( `74CJQSbMԼ%5RrPtk("%SRJDd;/ٷ,3#lI%[5N KM({MDq{5NKw*ݵ3 +MFIzv[QnffC;(5zl KGdϥ]$G6ul hz.I&kx1[ؘ- 38\Ws?xOB0%Aƀ;e2JÿDZgV?vөcղqǫ ?I3 lѺoJӄ0DfY|ٷd0^t yD:+SѴ(^Yͪ"V\8>y ;CyO5۠tadX=(Mbs[J5~s GfzJöv[zR,:PmMJ>\mh*ZZń o!mbeݿ{T~ZXiyk!!"@3MM`cKQ84A(P=\cu`g8CO%aj&çGC`@ㅝp0ݍLX9#cGqLk'\U1ؽԄnޕɝ;?=yrp9r1].?طbzwc#+ݓ5{j3,ig{'l;AM^0=-@&dxn\ɷNekd/Q}ILBk^H$0=#XLar [&T=z異|9&Vt렦?ٱ~X ;ܰr[)3x&}58-0(|ԙĩc=X2k# X>jŭe*$ B c4j*$&1 Hi" bck(FsLbgc~| q05E8Dl#nZ9; DYMdE*4!G*-RG8:*8Cqn:0D:Tܚ B,Gl cHlݺC`oOHS X\ ܓa.5sKT&TҭJ%h=h{ћ}`ުz>_e6t9,~NO~uGg_ d- [6? AigJ6ޥd腦ΊNezu~\Dv*9cERB.T,笹ϜÂfR] ǕeލO&AsDK=TE`]"K Hӭ$ "-T)g4>Bhq=P JI "bPU! 1JH4 F"P*2 X ` A@“!P @Li9+$fGb4%>8kB2~%H~UXrqP݃SW݃3.2էɊLX2 ?0q$ I=zS kgʹg?}8y20"޵6r#Eee"Ar~, Mں-Gg2YbK[Rb IƖ_XdU^LH?c?&t>lCQL83ߚx\>26& 1F]qgoo'\e|p 1;`Jjc~EE+PN]¬8 hARt@&@,dfuUL2 7$Æ<`G]+TSfI:lL1V'nﳭcf>wL!IJxUF^96e!钀k߯/QM>ͲY,- E&9X.N GtEB2+ њ,0p.m)gM5ﱼQn;Bcm O1UJ-kF p[_$iD0Ǔj)*;)`Id%B[Ƴ2>%F*F5o-s{7fk!s @dgU aPҨ+#c(Ϻ&uhhߙhu]nK) RssR`Qr82w%Z},&5nsζE!T;Eca__ ſD.َo] "vA SӻYs+~g1II1+=lGA v<ÐF]+ʽ?xZbֳ M/Na)\f3g. ӄ GWB J2KpwjJPB t0 szpԵfYe3YZpDUʥI3{~ 22_=$ [esW Dzǒ3q~[ՄjKςi2t>)bDk`JۯϮ[ "-')~Z}4tWhYEЛ *BqvC[D*1 r-@m"AERÖ.S^m}ZO<{)t/׬h{e~q>MԠ?DŽuDT$:~{RSр10:黡cJuو8t%{荎 [R:"YE`BHn@1͜5y{BGrI& F  Pwۉ%*P20{ s)4|J 6-a:]k#s/T\4 z7/YztWl\bZrgͲ&\$E2My!'22N3>Na/vw":Uu꼎Wn:d+g6)A*8Y,IWW1R& } 0ПWAAӉ bـ!3^y _Ŝ/My4ƓopGbs`ΆSDz%22,56e'+::AuA,32f\R F CdP8,#HKкVbRi2#7#7`F֝uޛfҵStCe6Ža) (B97y \ 828C !VS3p?7MEEB*%j^sc#T0B9 \P' !@ixªK JrIsguuX]f6|u-Ί??TRvJa{ j8RXv tT0ΓތwD0QhV&gd|ZŠ `wp2~ٻf5-4QĒ cE&0J01 Xl8W;зb&:*=k?PROY~I(U_MR|Tj6VyG B+}` UM8A TJѬV˘Zm9kE50k*IvpXU+#$paC z)-ĔpvJ6@ MxEoLA)7ps*v*Qh w5 cCMQ‘‚LuP"0WiQ S U # u a-0?r%J3Tù-Gjʹ ,( *c4מ"&'g*`ǨAnYt* '{֧@%%Bzȫo FW7#7Oo9$TMBt@+ " 83[!/B|gV bM!ɦ|DvԠjCx D  繧QN FzޅnV1^٧`m,B~*EY[Z-5<>PºV,8<#Bt(+pVJ5b ɢ `UCI4*uQfW()c:~n(0tR? Z++/Vu?z3~iBu3wvuɾLjq+sJ;W;I2s.,Yxz ‰Z2M\6cWDmyXi,d~篟.'kTX;r3c-Q& ?w4dLS{$;ykq(7eKG%6€Ѵ lD2 Y NFs3xϭ^g5bo?[{u"HKǛf*@֕!<,~ CS^/#_nQzeS a&TRz4t[].=SZ`1Iߑ&$(nv|KroSȶ}Vx?*σCkؠJc󋋢^o 2. $@RiIfЈRuGR+\y D>$GsNZvқIRJX)1 #*:#yz}j[6 A)DJ?IJܹ>$~5 >q`Fx3}?|_-۲qznl>eie3|w% ?_61Gd#'⳶ _#V(a.N m`#y$oXčG0ZjxmQuD)ѦG*^rA$L>6 %!joM/՟Pgr_tǩI9`_+J#Eg:%SK2XN颚\BuIHkuQ~(}_ԣ xIthHd\zXH)5Ti *LBjbCYF! \se4dyF-)Tz9s@E0G7V&.NEMP"{Yg/u/.RL>3l2 5>Gdƃ`Fǩf9'>L~!лͮEJMK]ruu%Q\ub_/?zz(@ G9~p;{KϿE1.Lk)ǯЋ"ڔobSbie;˩7ɞ.|3mJEfl~NZP$]N0fB"EĬ%" .XG).d>ߺRH^}#鯍])"F|_2pq dF.6ElN![ zӐ2)W=? )`kZ&ODݼ$wq~zڦ_$K%8)?Sf; O)YBOV2M<Zb^o% i:I~m]_IJm<րY2ڪ JHp+˰r u*4L u^R%bK)}IE qnVF2옰^KE. BY͇BbҭT(I\\5!A+2KL =3ZjAc$<' 65#E-I/bA V0[co2[b/9l<,%WsQ/z n_̅s fG 5ė/ BT0uݹ5\vVuQڑ QA%!p$ǒuFh'HPp9IRi~o] /6?ɰH/pՑȽtV/DŨw:tm\^-dgā#7Tr4c;e\ڿ2kLnFg) !vNM2 ֮YSx:T_ibK=GufĦ\bX! I42le6w.ld3̖/(eA&4 Zx7^ pqy%5#tB7@+uTǺ_n'q,cy3zMHK@ӣC8nt'#[8X/tp/Jڰ KbF!S nSwsq8M'yM֪87Zh Jf!Y)ZM/| %<;ϕrvG%wq&֤᧪뮞ύ=Pҋ籾M*)+I@ר3c0Br#Ic-!.,"F] zY';Juk gep,SXdEskmH˞łt"@/'KI}dJ,߷zHJCf43) `KTUu;; 5n`AY)4rQ}_ttZ&גe)O .qR#yAHb"@Q+h-:4-k&vuXQ5 )4Kr3rFXȬS &t$pI~$OFwVÖ2Y@K§NU^tzR] KnBJT`y"2RHDٖ'!Kt`S)$4# nrb߭RLX]+QW~Dzz~,w>/&(>yWӣx7\ah?a9ъ?~؉(j/W䒹\!ʘXnxwyIӦ/ȤD1deOFn:[>0`X '/ >tw}Mcmb2'X٣$E)Jp"8Z9GwZX a%IVn:L.]6ˤcVoF[惚,HK;H/ ֪0"&a$ħ0"}nnY1INh'ލRJ#B C^./r _eZ.BYE_J[i%sW A9p301C+b- ϥ:+6Cri:ʚ{tF&9 _`O|1ͳ͙N.oHWl?|nqnm79ny(/Y1A*& vtr6o/0M9~x3QJk81Ve2|"*T%Ep^(em,Fb,*#TҖg0]Lh־Tm!+M܁ۡ}qg2v}|c&(t }68cCN \qa&b+&w~xkk陕2}7yGno@l9Y˥Wprb8w_!Z}[~>ḥZ4i ҎsV2wזKLwdҖ-G%ȡ9Ul+W9V_O3FpӹN6zZLv NMv籴DB6DBN#Fo,{{ X%}:ϮĢ1z:_1Qx<%ь8͵~-nԏ͕|5i:+wwWT&+D ܨpSkZ7Yʓ9~ /+zewqywF_f7w-uw4 hkޝ'w9;ߓQpY>Q%~IVA\9x7-J__:ySZB`uv`C=Ś6<$q\|lwTҤ %f bJӻKĂ3'`/0/UPݩp[ODm8+;\67b#ʬ7YNP[۵I٫ρ ^}: Vt 0P5GAlT(Wu_fFl]\^>&Jo~./h T}Vwqs (ӅKpS+V'hhL֒(~1jk5MrK8t.Ij'K* A.oЋb= (bډTr}쐶a{w D^/ZH2$-YSu[1`=cvx۩x.-nf_] Vqgc{?֜I# M^PĄ.y|vQ#lyǡ-"L{VQszYdXic;8Jb z+/y EUbnV,>0탹UEc&oYyx.n{6*pr.ȇz3σ5~9a`R0~vl/S= La#Mda&q}ԥkdS֒`G~jSx IYj,clgh]8򎇻[Ɲ&oΝAHV0| GO{iՎ ]nuaʁv >:+@#C}F\c#?g5xn >ws9dƨ}O662f3TBCCS0|P05>{ rbn5-8bWM]IoU+칤lF[^ځ~̡5ahi 1rpLWj{ :ǭ9kEd{*f~.Z4=W>.hhX\(do!eҸLEGGdQ;.<i\8]H=ΖP;йǿ͉vKI=cܕ'1oxS_Ǽ4}-V!P3xS->m~;tEo٪Dśt/?݋ȴK=_UqlTГw[VU?O)(%KiR.!^]̊ h_\W 0B5!FD3O? jm)}8R2|ekW/ 7ۑ0rSr^iy{C ~HL?-݇{i~ӴZny=*hNKj$-`+ғ= mZi*vm0׆X =ZzcdX^ff݌chٜ! CY+Y`,T-͢4VG$ o+cǬA~fb Wͱ;~/X$9/՞MM4iL,5<TA  OYƘs>̙L1g& mjD|Bor_mA EF,s֊;(ԂT%$d{ekn̳1Z,fqS&Mfg. ta"Y(TsF9]Eɡ&hBLLb4hCȦ m<sK7JtAS1jmNs؂=[\om>2cպ`bA1 f?  >Bp(:Ш,'zX":#A;PY㏖Vj+|t҇-L@(LiD&'Id, D'QXS.{ W.6~,s٭+qJob4,-}b?|Xr&*dOj~G@~"snN:߯ȴ_Ng ! }#w.ty|Y5=_@[ `7dZ+ҽCA8nK zѰ8:ͬ8z= =)sĶ`dMDB>sY϶zͿ[΍&A'WFzQz#EpZR0- dcPA+̴,{x A*P9E9 KgV'hG;b1#}V&1/H$NZ\ʔ1kNHYM9u6fB |rQ )U o} w(oߛ<=]hӫ=F<=91 ) !#X!9GQEC.5BeI<,>%(*}U,L@o@$.YF 7n<* ?{v;(gړX9XK3&7~?\=z?]fW@[Htq3}o*0 nX; 4JM%>Ÿ,~dZۭ\XI"o^r5xbB%hwPmA h ,4%I* ޼wa0$\tN3.֗aH3;te*,'ϊȇO>s3 l:'+-]e/./v&)ˡBS}_>.EaJd`j}Ux<˞0Di{SMq*GN1?a ) TJ[c~1#_e`2eZdICpGM0J>"XޏaT7gR% %'`ya?Md\, Pm~ RoXi}+ԫ2 Otfq]7g0 8]Lj]cyo][є%T_n+_tNUBN׼\+;}Z ]X!~^8]pnzicNb˚2ءNbiGYgÈI&HA6$xZZpX">SvR*A-8 ;KRc,2VxqV)6kQRm8=m]s P43vӻbgAwv’6 7lyQjV@ b1oWft>S1h'Jʖ YB%uHD^~e* ArL}MaJ3&)'Oj?6ާc?%(d.}Z#%8(qR1KMlp`&oG]\a&lE5sѷ,?{k`|f W.қ3|'3T}7.tŇy%^}uoCܧo Dp/ƢSy_)Pڙ$U\FDtZxմB2KB^Jf҈mȍ8)qNVT6h@bUpk_&P .$y$Ux†BJacF 6bFƣH5cc-q?u@|7& w? D/ٻT , @RZmYz+r#.zU"[>0gB]1>oK4OGK(G?.S|E, $;:ѕ"7e8egmiu=OTyLQpWƓчY#7>Lm2E9e J0'éDD"hh %D:GJ3Mѫ6H UԆ3]A(sOP[!W`b{?7Ҝ͊ Aْ wp]+f=;rY%;~[~ UK%\߅׹w{}Ԙtp3 +"AԺǓuT vۂCذ\M:Nӛ>a8֜HgLsM[n=EK#`պ ؜szN )2nj01y̮U'fO}ߑ5\l~? +ϰׯ` Ñ}z:hUIݒ]>Zuw!E0<[.qY;îV%|κ~K\t E_yut YuTQ jPҶ\ucMA `&Dl%"!(]Br;t$&;+mޮ)Pv-+m젛Ȇhm~0@$qu,p#R9_@|T*q9/NCݭVԢ4тl57(M1Oҳ6$pRWCk~K35%jB=h\x .xI(J3 USrx7hV7`C?Vgs-{wϴ@ AbӣgNT3P̲AEo0_foLr'j:5,q>n Ņ/KLӍ׉> PVsswK‚QWdR&>U|%Iyw&&N# B)y45;_f+M!xYJ]/ ?  μ;:x{+$,"&0:~xnKB~_r Pw=! .6XDD"@RD(3!JvZt+sb%8>Ĝ>"RrΜw?Ytp{HqD+WVNdc8cIs5X0H aV)n_HP,As)GfhFe {dp]r"9 ڄ\i]ڂmK hinvqfw\ϒQdZh0k"Dm~y>6౱Bb_8 1/ Bws:XIĞM]>1r:?"ԐctI,0ypZ$n֐/xn ~P0A,'##c<{@'`if B;*y>هC #I= 4CtrX3gr`f- fgb!1QHβىlb&yw;XWV4ne{LȶnƂ0-R&IL%iE:MRa k T?5PB1l–WDW;*ic$&La_M!XB`~0yiƾ>}Tůs-FDפ|4z^i@bo$M[,*M~[OUpń^O%TQ_P>8Bw?{&䪷Χr̴OW`Ӟ.I i]&-'w !)S~QLۗc AxK=ioǒE`8}f3A@5"-IJ%o5EJC]= LMuU2iS|9i0%0IK6yKs-MS9F*G b:%=fH@[2i|yJN sY((D q@ݶ K!-^ 5Rr1R<$zVŽs$krHj!)(MlCRCEWlM0CV 'ħ&hM7oy^y)<>>kbQFk41˕zq lr&V7‹@$4(c,wp#$"`k=ci PXv#'ӓf0u%|S_{B'ufdYY-59y~7qü l{)kyE cV!grqX)h%۠J'K nj%{"!JuDLF<8JGёoo~RqZ_Vd\# U-wU1'L$$>olh{i:G8# ||R񀘑 e`%2-P>pNƒuJGXCvSK@bJp[͇K괋R׆qRVI=@D:IK.iJ%k17pM) lG& 5V$n(M0bB68"%O]؅ CIA'1-׾ f2Qثr{1;<8yi26dF9| A똵ZJ_*jjRmsgLQp&ě % m'b[2NW*O&*riarnVO:>[S}K39O6X`Y}%Zo4'#〥ƏIsN(Mh + TQ% C胳Eь4Q 1X# J'n/NߤԢW9m|)GfYo3x08oD)N>( kޯ&Rl嬴V;.gi:%5 !GkBE@SG;)` nFs92LgLq\5 .涛-'Y1&d">*d\v *9Fs 4y W b%M@?l>E6ESB9| Am -Ϥw;zd}p0KK<&6ABfFVݤ*n kqrZ F7Nv> B=:ۖζ Җh8[ooBԙ i$O0C:8:AqZD?޾?{; Dei!<6{$c Aw教&@S!s"sP$8<*1aQ3< .Pkv*u§!nzM4,>28:JK$FK5P #Q@b*[Kti6|@Sp%'y8A>$*d6':NIɹI/`ܐb|x­m`OuTyф]U wLX\$›-4s_} Pb(f} lK Hݾ1f規51ZՒ\Nf J]J4;!͟kONgHZCzҀf&h"BIٞSALpZqyGR7L#.IDl顯+d'Ν<Pogff  shG;A(=;CxmF2)ǹ\H9-יѸJͰLb̚z- (X' l (8nen$5CwL ݕ<ٍH@%=r9$cƔR 吺 Pa+'غ"2_BXnHZ *USZ?"}3ԫU82i+^[+B1x"dSd8KP@r,0Ɔ\H3$>f(Lq3GICDJx8z<`VLcTPQ%93gl(DA7"Y, 8YbA$RL3A 8# "x֐[!aEAM5* j >cq.ESat NU}&G:y wBE G @|9"NR&|0)BΐҌP#Fo w0Q0K[y$ꛨ 7އPDG1sNY+,5acU*'t^;jM7UVmX[yk8o֊ܑHp丣A)M9. `xlPeih w4^Hj*]%7VUJp6%F۩ &"@6_0%qքdmWXAUa֋+=-HP~\M%uEZn}i+5%n%L܏qs?c1d.W`QD?Ep 8RTY,2"WHfAJb~,@C@Ͳ 玽[Ab5 O= "8`f#Yg7t]s9gv_OKT7q.zNy& 9 !YK)&W Ɯs>} O{w/޿iQtx8ڝw{}{?אu׼{^jq?^faf4 ^O~޻_u]Rջt8ԧo|f8]%ם{yW hݣpt<2<,֘젛:ɗ`«pC`PrŃ`^(Օ[2P o/ݸ;fӰ;[3~>3AN /*^jW-n09>1sjNŷ{OJ~x!/>7u6UI2 ¬ !_^q%s]|~x:pL/^7s1+yn_ݏsfs{rԯЌ^'~|=h'oX(|":hoy=+ĸzG8M_Z]7joyp_(wqI`ț?ply!ei$,+ȓe}#TAV'`R2"3~4c)ݿU磳zǧ'NxBȳ{xSQ3>`/x}$qӿE_ #:apޞWc[_]ַ4?ч`%ޗ=8 =IYOFxw //-Lw\Q;:[qNX_tj.d߷sԟ;2f=4]/Q>f8-i~|fg*QdMZQŠCJh\УgΕ*:9$IIi4gH$,!W6)Ol> DM$d$y_ܳx.8[0`oz}2NV=&^PqP8O/3-4 * ~LQAs$h(M *pfh x7G-k$sUWS"߮,ekM.Ag](E:i+D<7PP*2H<[I/I JC佀5@PVsL>(XFhm]*]ժyt=칔bk*Шr*ШrY@-9 ȁmaZ7*%TB:3-4@Iga Hzq~!Y* .n)ktY\ u,V'G M^zu LB9U?mzPEx8t%*K@-gղ:@VCk̨|1>U5B0$M3koN2ZZ]6ed$t|rViA-R\0ezsFʠkBR$'FRT:rĔ*ΜQY@=Bj ܊ q'99~f Ae2F(B«zœEN㣌U>X壌U>X52bK[BHlniPJgnte0V;,ZyXK(ONu)裊2{h¸qWtƥdx)vX+o*krllUbmĻdy+Zm 0FM:@.པ|>uMgm5*Wr5EnQM]R5 `=֡G1&AdHQ(i4%)|V!q6E\A"?^)@d%ZAVuv>,.'@t=R5R 8l4˃E (vDh Q&SԄjxL'W"F@+@Ԡ )}ZYs^&7h&m̾t2.E ÞN 0WJ rbVt:!y ':)Y*pa7mUbD| Du;֣9B~ =:H}p=t=pPZxV4B:Ҥn3su;/uwN|{c Չ4pɔ<[_)uG^|iE3A5)͍P[^Zr*e8!VJ[:o= !eh+[Fj[Fk;ed=ps6D-N{(s) T(R)4fjhEJ.u.ܴ=@ii`̀hJrǢEȁp(iPӛ+`be֝>``Z)!>T7 Zr68isDK;$ De$&N35 )l͍Uzٔ^6MeS* >YB"%ap./q'`bVY+ PX9W|(UN`kv#c=XmW }<ٹ&]LNI" ʫQ ۘ ㇖W#8A۴ gL$l)" $=)&S!m/ݔ岽\r^^r ThrԈ|{+O6#[-AnD: ̻Ti4y";.K@*P`t@micCj0ȝraN%eL*/#A- 2.B[!ߵSׅB[m!ЮiͷԶiзj4jImwpcA* (Aخ ?HnwYW.ޥ_w(JRzN2KXL Og&FU@F!8' uN Fϵ [(l : ; P/tJͫ鵸YHa%*N* ,T2s2K 0%RS|K݂$B_ }=$z}֋B_ }-l::OfJ픒) EH +GT"h8"݌$b%uw*\{( iY*z7b$ץhK1TFc(ṝәtF U:N H蔴T{m-X)ɦM$akFa[V [-lUj'&|C*VSW&3m+㝪6)mk'4P gUв$aQd~Le&O\ 1޶[gL(f*1jz> aFk4S^{&RT$N¸ąH9jKK1XUd nv9AP͈B%/ľB /~u~,>M Go>\\/>qJyn3C Q#:YEn-rB)e_?I IQ{p*x3Are%0Fލ~ӌ.\Tm!2D,`d@2SA %&ĵZFf~Qk!D\Yp q-ĵB\ q]qC( ?!k#֨IAdL Rv`/L+A"XjmA}Җ;]n4z?K@E3 T(gǁ1q]bKndel_:DQkaX7go c-0X c]a&:Yq Jd_hTҐ*+ύ$*"YTQPG7ZPus꽲 oPW]ֆE䤄y4ba8:xMזzi),,%J lMg2̖E{.P2iƔ5 Mules3cO^" ɯ^r'?LM7|3[01]eMFWO?Z#V|\S/W4WfC˿Ok/kݸ )|>9u@a{R'=O%4I{]F7KT:Z'o,KI G( XSXE=B;KĚm wW|?*@`>j\KƝeWIS0"R*ǘ<EV&P<{Ԇ{l V U~|ՑW7bvxumgg_J&DU8$[qRW:%'4$:^?is^gZ{㺑_ew1{dE b,I@RH$5-˖+>Llzt7⹧NQ,<ߎ^ĔK=VSiWtF9/^Al;-owo.c-vκGoNN Kt۳pT۳9[&g4D|Fi҂ey~?|W7>bqO/9n!kf)yHQͶt!>SՅ>7~Wf*a"lhNR'p~5k#1xGTjƥp(F˖>rvMrqsᳺzI^;.E:ľּHneow0SMNA}`#5(Rsm fb&VX}CK >7K.d_U!;93Tcfe#=yyU!g 1JՇ(J={nظ~Ay{y|^;n^?4WA~z93g}Wh˫wZݎ|&(Kb?}˿hrQVl,Rx[ wQNB?0$gh6^` 4kO6zk?^&7zޱFk` Prw̟U xG9NKCsR3ekCtf )2ɨVR ќStRg&^DV׫{2VlW]`i2xGpPNKFA2䵊n fj(*9'/o6 !f"^.HsUG2-3^^Ar{,qr#vq:/.6gʠPH$>"k' ={JmA2!B}uMlYj &T!r]#\D(uL]au@nع`Ѕidqc !ⶁX1uP2G\@y HPyFR%cR(4) <6q T11jCkP Q%^a7|vW&ނk x=OMAEg~qں|G>| $~AcuBEl.rVt{, bv9yqK;\ƫ {>/)DC9f%B#5T) Q8$Rk#"8ߋ {.#K? V?< g\>F6%==ddWXt@Z)0~ʰ6c"ޚ{5js VF>vhRiC2:r,λ;K >?ž*>B_|k?`ͬ@xhbVpi01EjWJ ?yX+lH) }ϙ p3g}H%Q\?~RfO¿wH0DWL ^ply\?[dVq:X ov'g+P+A!%y`YaFwdA `kYv~90a+I5 |Ãz/㛓m_#b3QzGct|~z;fu9q5F9(tVw?rh%ydooޛrɃ`spM1;Zށw@E€o6 G}CQt{CGj>HYž*/cX 葢\aݚ8/?^\V>++B/ٮ9"^ŃYKtWE@NOG#RĻX @YB8t”`k}{o!]χ 1 ֎0ݦ>>G}OTݞ'"M^/6SHPvU*.lmA|Bd#[ϊˁn[ >5oQWVG]Yueut[Y(^9+E@)<@"ɕ| L)! }HB⋁dN, tֿ+gp$wecWOY*5~Jξ0ay0fҧSt%N"DJ㗐}gl,6F5YaQ&f"%aWjOkwbitY; ?0#s:WWwԋ۟uy3gVEtomt?wKQ7rۑ#{e"d#wAN?ŋX/H7[i?;]Qo[bvipU;[9y-4xUb*eR@Y "$l0:hS2ݣ9eg@M*K,;rq~v <Yiw@pŭzZ$~2Y\:^&mq^&t5%Q(I$!< ҩ$f P)7#{^+ו2'*˚P&w0*+(`z5k4WOۻ$ g)6EigO\el`B@);JlUqjig{zE}.В>v1&4_nUi&X쵖ZC&Q&ZM7iRL!.ʒ8jCuYtz`Զ{,qr1'ZgD6ׯW .zJ%# 4h'0{ Ol,\6׬UjTUqTE_;)]'YWxC T4~H<'4{zhzICEo%GT T5J>c.!hBE}!EVayuj_n >C:߳"Xb x]P˨VQD~-8[%[dpOQ{'@TDwC</7^H_zo֭U~]*&u h-c;2&eEN6PXǒPt̹bR፻4 T(T1'XoF(0PŻ9ևh 9^.1V\Q.{|X.W k8jRJ ·د*b)bXVT r9tq0T'x>S zsZQk ˵ܩ-Rm':8~b+3t?%]vDHΫ%,$HR9bև‘H b穐ItHʨk',ޭ:~iܱYKL̊2\*ЪYt9Lz01ꂫȨ Mgeޭ8x]FeqIӯdN >h26Xc2#rR:QZ+-ez䱭$8Ȇ3px+3I1i Zo:U Bꬊ+iMG L]-WEiWLLnهV"$D L58e(Hx"1mϣB?hdKn{mpb3Cb]gGu*Hꪵ|IJPW׭uˮ NRw]*-MBĔ8bjժT,/=鮝ZSaaL$Rkic82&D*UBn[ \ :OhU1qJ}n1)5kbB^M r.T=piIAW4Z6!Obr,m8UХ\ܪ#S6+rG1/U ~TطmrWgE Ƹ# ADHwY(7gq>|[ ^$JbiW%B$Q_ YCAxb[- ЇLH8װHV|̜_oM\jtk@g&K}k|\ ({xy?bvxs_qqX̮./amcU Ǻ?.FnbnF|6ZU狧}eT$4dv~SdQ2b'F\,oP 5Q=x,) ǚKH.`3gyNh,#ج|@8UڋgNo.>?2%8skVyAyAfwo`g(/]2gRHgW~7/}%oJI'59}";'?8-~d~)cX\1L* a8e;V.)^ҾBùfpn-O?yRk֙i@+d&K_'0ޥh;NQNJ43naW}ÞRf2Je8P6Á.B=NU >؃#! .7XJEtb)Ӳ?ĮU$`$ƃ 6E`s <[s/]!4fžoOZcBKmxB4 !_IhiD@]s\[#-QbL)Ve) +(^&D 50ԌIb}1wf516`l,Tն2y?ǓΑe/?jyq1x<+/l/g˒{IEr1E%rFVL5J7xd=1)3M6r+l =Nw57d11%%s¹<>!Q.uҏ\ !e{{>OFJ֎6}/v[v{#)B/hLNc>J9vџ߷K^}ʫֽX\79ę姾Ygzڬ(FcB{pWb|H2gGLA\bvc֔~O)o:ȧ2IMʃN2IyziB"U`ӹ))OLlEy7߉wlZ!P*7*Y|p2t@m% }R|^UkNqnLb_6'bo洘%h|-ޠFX(%/'is7a?s*ғ{WdI^_u-K" < VRb؊- (q΀s N ,d߮S a GxNA&{N `.mfD o,EF7hdnd!9o',iB`ڧO_tH7cIgBCK1}&E'+Ҫ2Iť1}aoU~xԫ+voR!?^=KsXSOt1- UD1$xz(jD jzj6'u6P/gb7zT#RaO=zj*\n >!9xjd?1עwНARZ5dQů%DjhiE7dBoʹ@]XA 1io"mu6 eL`]yqJDC@@EJ~6ʓO~& ؀-b WXI6 ClpoHmNzz a/Qc WDC|X~*۟BP\ztV->x DξE[Y1nXe,ϰh˙'5,N ~|=[:+f~/;v _^ܵ>0:<=:wSgF:@!h&Mg (7IylH-HV\ڃm>DVt\j*'ѕQ]VeJnBь8 \+͙:}7 kU zpwiv?=}\Mr(v{ V)t20Ōi>e'djr萳 JrІڼUE{hO'U;^\.GJHYIT4)$FQΊ4XNi(=f32og;Nh. 'Yxw?jojfbH}B[Z$C#9I ' !Or*iT!z$:>VnK8=~|CNh!ke*23ao琴1'd= %B$ɐS "J7C.m)!􈲒Pt<9|0Kgu_U_FwTW_'w?ן_w̌LnةϓlrqTx\gZ44(3-Y+I_W#Ñ8 TIE?rݵYdKMw3.Ox}{qu)c^"uf``*!Y^_~ytl$j&˴[0xC[5,K R3A0asR^ 6N6Di|:"v$^2߽}{(on3A^C&0Z}`]Sv'2 'F|2e΂7%&EYï (ҳ;)M.0ARED hQh<)dEւ>D(_Ln\ d4ݙ[g=skoox6ZLi#3F[[ȮA5UX#yRsLLsFZ4Q፥$X yH$<{[Q'%9%TXB(kx-Ągʚɑ_a^O‘:܎g{٘_Q w%b*0F"Hd&(Aя0LzXMxB4vŨߍS+іWWY1_}dmD>g *߿.Zθ8OIiĒ" 6+e.ʐ݂\I }ъbɭE zkFZ_LOYrmW@0{tQz-OGֲ?4{6抁b+eǠ?΋>N C+@%3:_?x5*'rsu5Nf7u1H1l?={1X`LFdd6Yܾ:2x1KQ]+9j63cمՌ %}r˫d>膽uROzcbrLmeHiVSdL4'$V8#nI]` m!W>אI%.JK͍byb0x4_ÿ}94Uu#8Y ; xs# Rl#U8; 0@3qjB#X(  Zj:a=2 d:Nc2(DPXLk%H%0WMN$etl)wlv9E axMgځ9 Z 5\E-f\N(ܷ[3jF+ޫN of.e8,kKY4ardjv1wEBv 0o8)իB2|Dvԩ!`AA^THZ]QAuFκ1]f(STE(u FD9(O PJ1SF$Dbe4m*α IfyA\ F;ڍE/1X\OBh*S^´x.) #c4\P" 0B*ыTC}ull{f>8CdgNҌS\w&=YS٦ eBd5z'g-T iB+Jl%Lb6i]6)E6<,-H)9mwE.̊7-P) A--\~Sic̨`}~; "5ACR -t rdn.dłAOcΧ  Β6*qGE$=|Djg 3M=NcPhƺ|̨d-S HD̘N3l wlHن?E/,vÛeCr@\ԁ_=9JeDu5S&GF5saQoZ X|DGNQqOa vGaKdu漢wG^7'wz?k%JZPr<^ o)e" } HʜؾiU1eB%'eBSVjEp&(PX 8b6ZK-%:p{tpb-8K]dmP WPKǒjj`*`0دEi/WWt+kA9|dmKjv[$X=ǭ?շ 6I&+ B=O'[ n3>Gf:.PN.EcNCƀ}g,A&&՞-g_4^d ɹ1[?Q#U2fZBSt`z.+ڈ4G_DA $` GPQ0 G`\ie~CUcCPNx g۠s()gB!HA#)bXƍ1]6`wt\}^V}ypPɤ} 0*8gib DT:?$e6]Be0Њ*r'X2lc82ҼTqi"U/!%KsQ.⮸^VTiCBz:sA7sF֚@}%aMEY SBqNivb7*nP}ըh9HFnQ^?ʽ?ĴEh,2$2c։Ty0ViYs)㐈Lh,1&`%. "&Q@tz5EAjL] H #; d{@!0i0PT0Y٪U;W/a|2?g…9 BMvaWO>iך-dĤ -эMg㄄h#bR-UЂP-r*qO*޽1 gzuN_Ͷdsm/$5%% H(Bnu١%߿ڟGeb|S㷇/OW!1ay0Tl-2v'L*\m=dtsXM,jZWw<[N wU[$ 3sFvJ|ycdy;Ȣ4+>&BB1ybNjbimhӚH)fs-Dm5.{s-lъ6 4 YPBD\- .oFO[6j>ӯ- ۢw䝾iwwI )* %!hʘ@3IsZ.%#s4 *~kyvv9 sQ#P J9/Q"gI;sDC&V bL/zF#A|[+ s\ TBD3TCZ>~RmL_;U9W|OMnԐwO{UX(6+iV,0P>Zz]qӭ`]Iqxٍ}oΓ1[W&K{@̑LGS0AUH[[%l~F1`@GYx-8w?GKsw9w$=I!lt 4۞{1-v7DJvVzR"+E!>M-n;QO&_P#?#2 ԔŬ@Y4.SҜ!BiWm;% e%,#a:q>įu=R3q.ߡ]'t1x@N7+OV[hÍRLjw)ߞV4@oq >FN|e-]h߆'f$x749B `I"qE]xѻmZl^@5 HXxH?r[h+Harm{XEK%tKyb eaNWӢ4lh+9cgƇ~Cme ōjC&:>~Q]r餘-ţzqɻ]:i+g<m&$ؓ\TfNR5;:_kݯ%)gkZ"J )e%e)OU~ AaPM5NɌQ 9]1EŇ1pIbVި4;t9:)ssJ[C:Uuh@GNSB;G|~&vv?[n@U: g9@R*t_{#*2aK.8(-yT=kqa~) <R/F6x @b h[Ii!J H$FZ7?&y&M6l h9g+tRdMQ ||=1Mj*I@Tk":8 >9d<|*9:>P( 3N[./YG`=" ?f*THtHK|+qF cOLcf l~)MyW $ XE+$ XK9VLo$̞!Ўβ}Vk`+j@hje&?\ҫ,Tq 9=Vffɧ142oX,OϿ僑ucwG6yxnFOVG0kPU)Yqg2ԙ^IIzq0]LfSB($)J>i$<- ]fy2M,V><>MkLV;͟IodgߛDu\K}2&Gӛx Z[ RY!smΒ6kJpm,-z<]"P*wI]dʑV5ƄaZB ًք>=j[O'Q*_A Fbm@&woT4_͎V@VD)BPf%R f(+UFJt撣Q+jT\\nF̧ü= G6ap<<<tm_ɬI.^Q7`2V Q|=f>PZhc#CF+Rʎ2Y2 >4Ng}ww$,@@HZ'H)]@ȃ/8Uh"80a]I@DG]֜ A:< 2d{ya@l(w1^ QLB!W Br<&ilf,o9NP{;>4pJs2\$sb /r\ɮėhP}%/r\j|38Ǿ uaw_!pESCu(ol;9´*)(2E<)8cHL dbya)}.BͣH.lm5|No~0Tw+,(Z;&尼m32=Xn DlKڼ uV#fރ9RcƔ>{ܽP hS;f:X;}#d:D$R8-)yJ2%DŽ0*2&rےGaK ZDő<>Է  ۱F_ev;*FK㷇7p7^L6FtZ2?BBҴO X" "ywD] o+pC>@n7 M!U+KVʣFJZWvT Zr|CrAJNF" ZQTNqS䱐.q_ۼ,A|I#!,ʋ-8ykE˅],ǼNa $bU?UEA0 +Z3yosa8D0CaLr^Q f=MHX9 FRhLp2aQ. XJ>0ob"$z}kEk7&?aeЪ#`<4)tlA9AȱQprXɭXQe`uQ=>-'I֎uҎ9B+ϭnO`+xͭsk{_QЇO .xLR- 7Y5P/t.,ܑԫdUԌDv\x<&(.Eu-v`Xe9d(o2ۢmQ({[ :Aƈb1RVeG#B#j\W 8.=]3e댲uFMd%dtbɕ4k`0(/i.H5+H5^cw3 Z~;_[*1l&YQ.TI+HVs ÷" nW3 l/ /p027vk#F*lĨ_2۷9$pE_@ 1_JV+y\kQџ't#Rd>s&ƵG'?ЙE;<=&YrGǀ,pSDF3lR @\ά@%kNћ8 U{0x9r1[.?B_,>?dEE?腑~;_b[<?tzZCeC4MM. Ƨ%8?^/U*n/;hc}L43@4Q/9֠f4s uńI\LW/}/b$V߅hn. #8L]A2]t e1~p{ƽHս H}eaW"h%}kg7P#l*2 ۯQc9ƢLe^% ۯ{L3l]JtBs#JeM8Ȝ*a% >Y{ ۮ]n1KnK{󍝯|̶x ~> ;r&.D)XϽL4 %"jVeeJ2C%S?b6s 8~,3f <8P!lUu 3x4*O1w{`-F~=XujT?@IJMzF5(+[JR-.=^#;]e0+Ф`V:+ vE=tlM?Q?ۗ SR."&0ËR%I484a,p TX/dF:;5K c D1oRY@LҮu/ZC)sc0o>1̛O M0I*GiЖzJ$HSc LTZ?VI?Vi0NK| ǵ}@>lPid8vQ3QoM׽Fb6uձ {DLKYc}xzTq|r<~r FKUk&Ћ|ݝ {vc6_!t~թZ趈U}N(b@ A"4xtϡ Z7ưiӿ'ͣJ_+HFѢaM9Ie xٲ}|/Lc[!T-tM@, .~;9-/9?ctx4ρ\B!C+Ym'cq1׷ϟ=>yܚ !GI5qdֵ1 XT~u 5N%D7@H?U5C(}OPG`9T~*| b'*=|O~b8 .,2K . /p^B9(   HWr8pBo-!_u?@LF㽪ŧ]QPoO~qxgUiu2 '?% ٫=bPL}>vܚӼƅ՟E|1huytt94G1<~:,=֔-' XBȋq}t|_dS<|#ӪގkԿOv\i?UE_ :LmlϠ\'V 4OEWd)TuxZf՗ n[u ufEG+ 2qײ̸4r>IRijW9kSi^W5$*Wjt51Té.rוR:)L %x|9` #mhMUթzkџܘs)h mM-jBb8oPRG%U^\-PXaWש^+fѠ \^m^21Qdx!hv<?C*: 0z` (:0x!u8p%3*UံC #bEPUqC TR} WMtKEGK(qlҎ(o 4|G |!2'!9Cyl\F2~)3F7Aؑ P;#&yy EʃP H9{!Cw]8x=\wE2gxS-͑#SZx'BM2.wdzGoae;Gf 2b;w3$D뀣vL˖FcL_ʘ Q69LQjg׊:svA5o,6_)lhRJ mlH".);͜TDmהpM- @bIH1xT6ܵNѵ8d\mg s!x l9 XyI'gRw4NiE$h6%:grIM.4amhDB&5X_E n1GvuWB+ܕPJٕڀU: m&N__#lPV(Bz*\i"sgq+ a %!$2pI'81֐FڹL^Oa|Ҟ$EP>{-,e y4V&ʈL)$<b' ئ`!IHPPbWGmMPC e:t j] DR&ǂ㱯ՉsôQS ˷tV!텋$L!V[nj!xN]^@I]}ZVb=A&F N4A8X x1I҄[(2nG)ښB{f۵N̶kC ~֊ü0o8[+[+6aX=AqMO"kU4+c@Wgk]X|$ɣJ"16bxy&ms"'lޜ+u~@ME+MtatND{P#f,BύT -S 9H璁3Cn yj7: \ʛ2@޵q$B!@ΆTߪۀ^ba;y"kD "INTϐ/CqCRv#aOMu[%d0 AbDmRz)bNhBLTkA*2RKC*+;VZ]J m/R1LX˘eW@Y/ d3> 0Y;?v0ǎLE29sANҀZlP")ENЀ T!'7ZT4Y5)Ԭ4D&Aԇ{]gBiǰ$>%1,r⣕3UЋ̦/g8״K}ZrK2!V/kr39%(e$D$$~dN[=X:+,, |!l׻ޯ[K1][5꣱V\]JјXs~Nd Yc.rά|28nu?Er-OR{[UWl?=XSr'CČm}Ǐ7)"Z9Wel&EBosY#do9:qhA896:摎%[MO~͛ݥATJ6*2J8xԃLDŎ̝dږdڑ(xgp2 Rd`(4:OYL$bƨKBpC57H yytוӺďWng$Ҋ]]++?j~ o [8ckgtuPMBw>Rлm*4]adZ}rUUߓ;˛|HJCjV=αO?~>^x1V_X5ĬZeHnTUPzgByq>e˗ W3qo0|v}v7f̹7C `} i ;u,WڥC?BB1n&y ]Q/7-.sR9M>]MVo Ws^1-.=.= 1 e 9?VH-??EGll~=_T^/5}0_r7MeA7jTwwIy_+Kw:]>_}C/r.xW1m^L ՟HWrA[R iuOp@v<443vlrtAKT_Bhnt3NU]BQ.%nJLJjiSMz ͻ_pA\ FIψ!|`Ln$\Ӱ+W.R/ k]gyyt5=#\~8n^ӳ_my 2p3X"_z6iv$$%.h^0!sȲDCT CtC+̌O\~#ݸ1EG}W؝G@=I`;WL͍NN^AHW/I=,Z&F~_u&f89j~P&2 "9o2ED&HIOFŵvJ%}ߓ;jjnVW_.ʡ۴HQf.&k]yb 7-|ZpQ?|DfVX^*'.Km)m{pjޛ+r(^cU-8?GinSrRӬhJTABrEro)g(}* I (E#-Bf/l ;ؤDcXC YpQ 6z c-7ЅIά[H;K tDiΖymBv ։  oѫ~p`u=HDm@^[ziR.Z>$֗^K.?BF%ֵr<6QP9&,LjW(\G$ l%VA=rRR3ZPIjt:RII2EaCi ˴~E*+x]9ut R+&H6QE$CE~\Ȏ>y?)mLNR}aJFi%7A!ьlyY O&T: 2a龘yvJ#DliK3vlcr$tGжl0`X\-ME/ٰ"{ !7#TkoGP行TdsPi[]$gAkG 4%Ns4TmZT/P|g8\pL8ʍ$Xɝ[@XE'-+:L2E e4Ũl 2nG$+6@#ItK,:̔Y` +۴(0ddD1Ok<#HݢtHd:9&@"dE@%J(j H"B7,X%$:;q:rEAΕZj HEr1ã׾YR4)据k>pi{@sx3%aڄ=䟻!2rEN8Sڰ X6;\/j /{ñpwoK~|Ј{#cܕ Vz:RMڠΑL(d []@9g9ĴԑɒEⲜV, <(mr?k5g 'e E9W x"EoH*$nxPVmyTy0@VW_7zH+$=`#mKt,5ɵflV$7НX@zH7կvj>BAjf*zׁKm4H9GZ$ˉ>D)k4[ %1b L4FBIj{E5@}˅zڷ v7GOI|7wcCXdؒXe43}(>KBӔtL}Tl["6(LΊCDRԇz`o@c@3y\59p%7W LE{wȴ=/,mcՈSWh 7 n (] B0ـ ^[ {UDn-ķҾx 8Lb0qՌN,U]Vv='y41PCBm <ǪS %sb<Ƀezq 4'nH>PxYb)4Ƅt :(ϲ:wLx#ג#2 JBAL8]E 16+hjfE y)*CF`ĹY4RΚ@,iDK PeP&hyVԒ.<Ԩ&8oc풉?B .k|ln l H5JqͲI FF(kxSC{F$AQf)$R8EXm\Z@ {$TP#)nJ3{N ҂iB갔=.c;e=u :U?&l9+u 6-J@)  tZ<ۗ d=n08\(q<#KNv|An9.Мu{kJ UIr*\l 'O\$Y@,P霖L.A \d$ P܄0dߘ4#V41mE1KlJo5Enk\ cV rfۧI|x7%kBnHC&C&ς'ayXt_fHX 8eE\G}@Q:]⸘Lg2{8~"Yv%H$O!M=J\ٻ'7ncL\pq[ѫzR\đʲ/Q0.m^!YI!ww'sԎKֱBCj?~>}&ӱ7W Ҏ1.Q]uS-\BfυP0YCax?fT`M[YӌgaXng{(C;Ƹ|F38n=_.+dS?̋Ƿ^<38kH{-FO vqVW[NaSy3- E@Dz]b]%]t%1NJ)ˌBH&sXѣvIxKsE|& x ]ŽuE^B# Jy[{70f󴳝p ׏{"]B̻ܻn5I(&;>Q\,+Y7]Xv[v RR]$("H7%h07h1sIw^/VϚcQc2}m-Qf w*^뎙ľ*E wt=N 0,,UX׍- ڏ'wQ-\U'Z4s˨e:iR%: W%ZIcf0yMpcm:2AkEɨc"A \k: \3ċ}KbFTFdYY~j WYR6CF T!e*(uQS-VZiHYwGQD$i!M@n)M¹ˆ8$y>pt.i{/ɃK}&/] $N&ә:̗s7rTם_j,Z Bs|v!r r8kP6BB jg.1RԂKRs\R *xJƕ214-ː)$x<|Ût22\H255 5MZNpSҔDikv\;l`?%j%RDƠ (gxa)U|ՕObJQ r.Yg+q#@Vƃt*K@M2@D, _9+PEr0u2eLdhFnYc?F6!\D<_ix rz{x&7 |'1!cg,[x[X\!q`;f6ip,l*8N1\B(\i0" H+pr?97t:omet+0_".x6rh`<3߻I}[yA1!hNP{AԺLRXs9*,Q"\O FXym3 M("k9(skE&*p*&YL1D; $bm'YbV֠Sz5R7+,<c 75MQA lTgvObOe +,ApaUYR_4=Ro!i$@ R-wLa-c9E܁炫ǥh4A%4c━3 pUW,s RH'LY2d*,56-&(r;,+7y]vycӑ],OaK pۇ79UsWf+Cop& ""F8.c?~{Zk:nWQ5 W;3yB3^ab4=[h!  _S{?`zt͗" _|]zYzX=K77< <SW%i*5x:#-L2 \`,JDb`ZF I7YШ)T4y 1]xID&eL%Ҧ~|lO1c giƍ

/ +Ob`o,՟yϷo߬f K#U~ExVbY{y1i:Tl$fZa8 8:\H.qʉf5*~:m26S]|?~8C }7,n  <0DSgge5!v^ uU0vC7|=@v|{7lֿ 5h-KY'Uy"Ј.k.?kJ(j"_B$On<1#̤пLN)Z%)Z5S.8~HVcF5Q3k #Ii?)<ј>d4:KMfEw䃹_(2֚-fwA}VQxNn]qo_/Q5 >z(ं"H6b2cMϤ,kҶ<8㧃1eƛ&ٻn]&&ڱ $Wuo5ITB֬bO:7h$5?98QYqM5̋Dk*q&()j @'I?+W g| 2i̤3DXnLdކBXOȻ6VHA.9}0)fΤɸ$\&Tɇ,bv|s^Q!~ăޖE`:'!.5Ӱf$kf_k$"tmPU:1KI:'34hwTZ?y!ϬI T繠3kG59:Hu>kV{{ŽNWs#y&_P~v8]Pj?iJu} rutiZw1lN;W\J_ԃ!΃ guѐ!*g]b:RBӧDk&On-yzEnMM"_]7=/X fX<\j9w^at4Ҝb9$ Y(;f)ql '+Yõ!!eE ISd1u4qԹ$!gI~e^~%Ԉ Z%Q+ P]J4Il: FکT"m3e $L1`q&cY%D}SunlP v@n8tËra>.$T^XVH# F(H;SxeS[I22^=wY_r% j P@ '/O} cdFjS!XJx 5k yTm.MDG&%*cplb>։J9p 9w?}9V tv!T.8 ux7hRXZBfm<&-8YࠊX[~\Ty,u ةDf;)R40GN~o5uo 7=N6Vx1GcxM]mx<@Dn'kqz2uG|X}cyxa<2g'mL ӧi|3q ݏ;oEÏrүQYu!__:'}{iߟ9?AQDUKFGÈa‰?r?w?_"pMoh3]h̡ ivA[QJ*Hד-j}?V}u |phTE}/ul[ ,'m.퇿/>|&J$A%W37EO&>-|UA59sy^X5_d=yZf?]ӵN}O,زk9(lT)12Q,\wqb9}jr#HII]SLƼev5"$B"dD`ZQ9XIٹU0ՌMDp9J`EuA` 1V=Qh#|WʇO#I0z"06U[+$r-/VQ㰊)j Wݸ*}4]ֲIP]Nr*$Yc{7?gY x 9  )D?FhM㿟`Ym>2 hN.2zѻ<T-]d xGd q(ѳ[j}LzhLRa}fLZiݝ\zu6= |>iJmNv=:oy9UFjnڲ;QoM0Sw? @h׺Oj66 28-;Ark:w5ה 9fV3AH ٻFndW m"MbqAn^NlcmK$yn-Jݖ[}40Hf$Un$eFB0a(QpxE@Fvt#C g59-PS'ػl;)Q,ZS#ep&Np ~"p1X82éudqg4mAV\=f'EDM),?OLak yCUR^{H&UxG 8,~|dJ:cq cI6#Y#m4-U. sj󒊂P5i"s&SÚ B4]g8eb?5g2WĜjA"󜌠 ef?3d$}c% dM^݄"/!AbD= {!+*Ŋh֊Pb: Zӭ+Yn?bgz({.Xv'1 4jqkjx͵"J^`4ZBlYQgX)hA@͠[ } VH#%ⵏƻ _iAyIUo V&%܏t gt{ԥh9뜨zk ţ(@P `J9ݮk u5˕xԟG{|A2s5>٧ioe|NL'ܷ@Ӿ{+\",%f_E]*@ D|ZUg4h%n`xZ=mx$RdXYrA(hhBTTO>i0b,nD_vG)8{*C5hiCBqQIH҄X`%ta`\`zWO6M04:"y."xT \őҒQQ2" yDJ<L#@([5(,+gN P(TAȹZb9 Ey$\`@T6Էil˵un4XR+RӶݭk!2ZfXD*GAFEJ @ ^zdhŅ.b GN\AhoA' pyB =/C-Ma:EXA%9-7IkqPm<@JAzhP)QGq"4Zs"d+REE%DdJPKbΗ3hEUيk5Z2Cʠ*dU^G(AhH%ۀv"2"?!+"6i\Iv"2Y=^ D:#=J[Q̡AbLLR1SCC mYlbS$Q?-.-,G=n/qe"QyGߟWLkz&~Mܿ?_/&BLgKü;J6n{rs[@SDdxtvV^g7܆>F(Ma-ݻыmlK9i1)^#1ː"EJq⊚wxllvXkN.|̀: mE˹fa}w5'"ngaM!=sP8's-zw󣳽=0 gwBv=O1F]NHWi!uoȎfy 5_I̗nq w翾oz=VJLa>Źe"??{ Fc{ `4[WYˏ|.|ϱ։>%a}\͗Ph;'j;4S=8}SL%;;!]PN gy2/?%~x(_zWu*G?ZKrhë-dZyҾu)@h73m\|lr .SO}[\֢eθ*pK g)ɥsu|ǚݦywZ2"ϊ# c`eq/# N'9.dzp)V@Ja2IBVKXWu[7ULoE O߱}=- !pt݅J'Zu[p`?6. `fLvA{$XmS fmӣM;?5l,BLQ`Q2U4j{6jd!GJN[}|*I @daSSB`tyŘm©wnT1itjO|nWZ~~2TK1\`[7υ '^'{ 5~szq->[2@NȞXTgN|0T2Fh *ixw3>4\,/deaJ:0PX;h> GDF9 BQRK 89P\1F)MY:%3D@`:z~^=(3!4TyRYsq1J6#ҭK6`\-QRirsR}*QVoJ=Z$_sY#ݸNIM.G3*܁lx/#S67b`|ܲ/MЮy!CAlTczEs~^:oxK%ɶt .e]\ 5῟@N~|9Sҭ:F@~z@ӻx/\5~[+H UoՊуf2^'Tr*驝Lkc=W{rǬKt(wًr1zpfn&c#cxä<:[[՚ح$_n-P{w5WR}'Q崥 $%Q <K-H $Xwh"BTv8ΕO>WL6迦ϋlZϮ}UEjeh(a2PB*b.UZ0"hq1+p^Vx>dL.eۿ̰075xͭVaxD[H,X{9 ,7?~>OTgr. k n(QPk'$m./ n.GT5]:KH҄jZct(p 5^v|}ֳk39|1lm1sOĘ֐5U'8h~;g=DV@7(; J`yVJ3*[)yl xۄ4%\r;8]o>|`E)zۻOe5ћQ)cLJKﮃуf2Js@@VEM&WH4H4גT~X|4>̏b/xZ60ʜLޟ߿v~LEC.cϧ.Ό( o v]%/-hIտ%FͿ$xޝ=Z7ɝ# !Ga1 7w jT_%o*yWUo:նNSm +(SJu5Ժ (갽~Ȝ \pƄa}fe9%XFQ.&9lE幏s*su߹Cƅ$$`y!CyK㻏z MJ2Y0)wU >󈥟wb'g _o4 8[rEvv,b\LFWNtIn۟s %%4􊁫]_ VoӦ$|eIao VvPmٗyYt Y~:GM/xsP5=;C\S8\o1Pʥfj"ً"yPo~鸛ֺ/#L^9uwlEaԁqq]oh #r<DYly!k@w{ao^>6v dEU ud8؎>5\})ٞ+߇}U^dge\eL{ K=۟O6aKޡWP o"և3F=ej_ ޛ~Ak`WSSQbݣ5 ZmaiJ$YޡsHgM S L\ػ8$W|y>a<, "O^^f7$b m*2/"#:9,dqmFnw1!`=jk( Ѹ8%6SICNlYKHZ)"USړߗI,J;? EOF& ֙i*2-IHAHAPIcmar, Jٍb :YH=I=-֙>j=sЃ' {z(VX?elBM$.7YdoEC:H^iHQZoB.{쥒FK0K6kd#c9=Fl;Z M 4VAk)@EvjV]m7t1C& FŬ8zH&0-N=&Ƕb<2'4=)]WTo4d iNg$(A͸yr})}Ȑ%(VKT7`FI9!6a_s9)b{6ÚJF8Y)B_26Az!@_, c#*2mJ) Ea-.Ukd/F+gQ~Z7UԤJ*Ye;+Q{ .=[7QF q  zO%9חǧ~[!jarIW&|}SgXBv ʷo.83$WfwH''gvv{~d^ CǨZmz9p9c vS?_]sGB13x(90g-k>?[uuܮCp@ Ԏ_?銻]7%[xQ>^@#3vG0(JC&zHX{b!%i'$1 )cTRu8d3.K5PٽGl|-,nz.>C?,>N?ZYA2tZWqVrA88$tzNFc%Ȑ-PteȜZv0sSbQJPϹboMUuӏѥv;)/LݷNjuquaUn/վQ?_}v$7E n=jΈacX˩)TҶ7~6-Oyn cײ1꤆ıC5A:.) yQvnz%x/9"P9sĴZXE1]('//1hC uc>B?KḆ,$(cFPDayU0qJɒ s!֏c>NyK|J(i jD?܇4j8E!ܓQ1$n*Mc WhUv1D251D^]\,vo˫-R]ORa$}ԍ0IfȘ&MM2v ȴl8a}noj| ѱe2dT2:!:z P`qIY.oLτuAŦBŦBŦBŦ {UWxGڹ}8Mκ̾ţ55lu~5i*q$pȃ2Yz@X3Ok.ich mU;(֙ԜѬ 6j母\2k_pnl7sscl/ >5YHWbl(tLLcd7OBf,f_XD)(Ʊ~9D$ŘHQ*Tϐz48^2Z} 8}/`QҁWֻtT:o Sy}!ŇOCUJJ ,q za7SIJ@ de4^pʄF+m}B]~j ì{Y:?G0@^bTU993lf =43͜g咜eAbdv.[2ӑ2IIrx]ҧp;Et:U(3e9RWkrά j51lM)XBbBt= Y-pP3Tk,Icr._JYkiK}4GAo%}͛I{*u{"YkCq,15R ũR0h!+9+>vܔ? T'qA[[q*EVR_uKuBC瞏7%asN>^q8)N\ocaGù3F=Rr@HIqckpaY$M#[W pZe;- Nc4=#7oCj5l@vIcxGˮ->|?_vگ˖j&_UJ6YvTV#6$@}p=#VGSALu-{1xpY8] GyoG?xErۿT:lѽ'>x?Z[=ɱ^X?YU..GqщZ;HT9z涞__&Ie~o`M7=vu#g×|ZX]k;icJclRDڼ_L!>'+kQ=d'&!}ᯫdC -Mtn:\d=K$5XI,T+|ElDYXߋrE9Ӡĩ`CӺ9t_ {ܱZ7n9oxL.yk֋TGϋM\_:{h^?cHn^#r;&*_Y=KvN.SVf'dY-SVl+5WK} Oزiyž&[>_,*,ONg띏8n"0vúfu ?>@zx#v!hx 7tU+n|nŲwmmH=2wxag*){RjsIN<5}H"i\DtbK t7֤O5gk }kNA䇄IwF K W-<pSf&ۑMkwZɥ]'j b4'TP"X:Z EȀ7HwdngTBldUl=,=1v:F5˯_7W\ xsVYr}/9smVbɆ݅T<0F W7h}ޏAC-B=ԠC JRdFJ]>;/[J-'+"859q39Yۏ7tӌC?֛`+߭-khDԪvc4x׾+gWwL'LA1dq<~|0PAaEÉ`.8qWɊ[rqZ1uY6^(j{% _|($Nc%} /kza!81吗bve[Bι rM?{Uyɳj+KA^ּ%#,XQ$l ~@m޻>}[Hu@&>FDch 1ђ]+@|tY@KxTnc\V8;,~h|V2ƥ/dtU"✑Jd4.ISEvǢƃiBIƒ,,n) ݱ 1 -YXYñ k1 .YXY(4faY% 5 H>znh=831g9zq1GWxR==/:s;OQp=փ(x',wRJt 1 !9OHyJH'^! !9WHWi +8{p !;g`^Qsp ` *kk*1r#Y/Li㴦,ŕܫW7⻙! &79}9Pd1ٕ-ik|x9˙Uޓ ?.Fv:G2-Պ] KF.vvkǨZDS¢ZX԰6S++I'.&:]QeñKz maWľv5,X7cLE:[PCZֳ:]B9vF^/PYY+P#: [j;/}2OXR%BZВ:ll$=`@eٵkvι{uW yo>u/RwdY/ r' GyZA1HSJ: mdqWo=6S\}+(mg;,wz޹c"'`a@X[4Ts4ǿ1 Dl^H5Vh"o n`mgw;nzP23D+O&tH%4AR"Fta$:DЁ`i;Ʉ⺖h=V&ᆱM2%&=]᯽+n[Vi/BjVR66ݬͻc u:-bkn{d nCyx:q`EvObϡP؝HHw&^5n 3eMb-,ھ4 Gw9 -Z8xg  Ӗ1ySjڵ@"f?)5'T?e q|'jT T0秝g?QKE~ìL^6vT} W* (F,ωlwHk)r?!=Wgt7] N?Vw%VBG%/ wݓ=?ۛzu* Rxo\5"rA.U|\kw|y;<}@ax?>pC~=&tF'RQ'pEA* 2xAcL/HIQDIv*>SAk,$ +OJHl1S{ҪiubhIՄ A[o=% QCuF~?_W=Hn_B\^߄@Jݙחy]dZNBϙf=hϣd KnRIW/}-^[& RO>UڢY8A#Alq].xwWg j᛻_ʱXyOoGh}L5Hh-h<*F~x HYeLj <ͧi5D]x$|L WhIݎn&(?K^|cb9hX}6MbJۉj`U]˙]_z>sFv}6[Pl(Z)r ޷⊭ПΪ_hccU2Ut'Qk iQW}/^2΢{= }f޻8AM[V}l1 Z.oҝ=%(tnvU9cvL0pEƹ{YmF=] Be}*R0!z\' /8CS??'cgdɲj DvrAc(NV>M]wɚ-^pуܟr Ddbrѓ?ŜnЏaUODVbzrFUV:]-/AY"M@5lKf ~z1*ip8{uΏd lW_311CDck ȀU6 ~ ~e ~EK+-誂_r4/2԰K98zQn;{Z $'ld'B]|2h']/92-}\[}>8V?6f2*Hބ,ڟS}pFxp vy\ M=Vk[B(Թ"*nTǠ趚ohKCľa^!AJc?*Z*ɨv>HTg'66@K3ajU}wfd)j>nGSTQ (0ʦ* \l,gV 0"tcb(P"(AGD0Lk6*$6~o,KPh 1Zp> {%I mDS@pN0AeG'm~T3pW<d+J}!M^2C@cC``'hN"sE~DzMl8xSGз֣psm[vO^QvN%#o~\58+s%@^?W}uF |K.F&g?^x2-[6c^uθ2x՟){<7PS_3vwŢEQsBn:,Q~{w}}ymƔ[5D>ۙSing q-\{1ldr]{LIӏߢmS$>|zY-((G>ȯ3>7!H C:<9JVG \)~&K2 yBKng1ƛŸS` pے" B<5T[M?T6QEK)K[ Kmi443gϻ9?^Lnكz&[YNnghh09B88pnQʗ7Eሖ*"]䀳pvQ8/54Z3D5`HH!hSNMh0`HDG:]-ZہZ-C^Y;G/*qSϯ)a[f.x_rIeYCBlmF缰 Oq#G'i-,O&:"Wnan Cv:6clTَSG3N xlbAmIA1#Pc8WJ1FOvqŎl!q_c[v]`wVU@~pnѶh+D%anHQ+I\nVH,T].2T4Z pP ;ԦiVMn&~WfΊTEQ* RGi7݀DQo:,XM S5<%֢S \SQSm@tl0u#UXH!GZA B(!51y6s6"hDB7 pDe7Z!_;Y")ָV0jbͲ$ъ& s#)!T $ޱy9!5gF`6e<% bb\Jak7I y٭ ~~hK[o |aM0 6Ut}{|F2#Ῠyq;ΘCRO>. eCC2ej92.fj3^٦ LeEq{s' BSN8tfU:st놵Й%jKtx'/'/ԟ cQEP g\U9}H40Zfe8gK"f-g,\,^qI XcAT:KVNC}l5l/=x[h?=`z^>3 IatjӘr#/V&ˆnrjjr٥*2ln.Wu)t0K!Lxaa?0h{w,B\iqMmsmQϙ4Gnglĸfx>9A{QZU g^*6~:Yuwg{~0 =x/?󾚷{QHmDRxPt{m]{hu\I34>5&%€ӭp? %m-tq{f!ccEbPqcc?9Xf% E1U4mn97hI2 HmF"a,h~õIFpn o6O'7c @^Ow Ab<w?{*Bjr!Qql3c*XpI ,Q<f)kRHya>VbҌs3feROk '0e+9J{J͆z}XnڨDez`^3畻vX|6H0"N; иDz0A\dSFuʹqyk4(.MaJ)H[ /%0:SW+'hNХ2zU'z4VN% '$STHj0 ͳP!IEjrVTP'L\RS4*9Tn>4sCfТ347BIMsBπ SK,!J<̓B#x a' Eޮ qfrqbW̹N.82?P8N'G\tqUlW9;3^L/LP23:yHUl1"}1jT`GA8EQ1jڵAllBı`s &xn]z" ̂""\VVA%k"nDZ^ *"6r:\8F.[h9"BAlUG aIW:@;?#lV}YL^TAS"e};M?ݺ06_2Av悙^٣W#5xeA>W7f4LoׁGPm55qcd*IM6ҧyw,+p 3Oq&@S2ɾїNx~_F !$$ݵG^8YyƱZ3m(bP5 !`\10dgSUezK>sS~=Z1,Pd FgpmAwrN6B%, z'9!EEXԳ̂H,`iAxm,S~sj݋c'C-ι/sƹC` \؄Z& I3D$3 ).tAY^6&g0Z@񵫟f s Zy\קre_,~h xEe Vjc|90:]lnKx8u|͔)1%s4Qө>}l?0{Ts%+bx5xY\X~8"я+P%gڠ(%Aͥ >fOI6>%A1m8MϳSO/I>T 5s%<c~ SO*/eT>=Yҍ___LiY֪7o4K\ՂS@P;5J $ )34ScJ 99̉I rXB)d8x"K<+T9R-4ߗ{{b?Oh'ƽLJ.эj'WeA44o ̃^l]yk=`A;w^󱞖d9`uW]f|XRsmylsߋkp3W%{^QNGC*S7Hq->*ޭEmGAmڻxz'W LR9xPN;xǸ :<[ hNIv; 3yi(|T'U[wKF݂'bhȟ\ECtmpn;Tߣh"X!6eB}>NɋVD JܯbZNӅB# D} -C_J DKv!w.mQ$#ҽ s\g*>] DcH)pV*Cxŕ dTB)'};L$X&"9k/oL({)l[ɧ^KSCH-2~_.לS 9r+`> Qg>J~LEqm`4/ b"FیANH#Jldg2OY4f_$+?"x z}Ѝ;4;x mF`kyZk99E^(dX|;m|9I^LJ_?9Fy *MlK$M4)S&\:e #*tySٹGbq*@yf_K dKKa_ؒ"C'̒IpKҢ{E(Y4Ƞм2G];qw-: \!m1K@^nVBv  v`Ld{b\8t$SI `e%eێLs٨BB;/:a@3eKBОΠ+7-G0 ɬ`C'E 4&3Ҕ+)˦Xߟו uĕID;Ψ?X yѦ78X !waԟ֑}v@hKهdE9Ȫ0!+kɪ_vjȊc #˪8uBc<"Rvca,N4~Vͱ•&=/eC_ޜ/'(.-mo>h,<ӛb-TR9g9,\LB H"I̧33us?jTUk6QEDsϪs)hdG "\(,ܾ.'H .Hlk %m*) 1necZ 6;.nuQ.\Y%(FZYDx "Y]y'nޞ6?@ԙ}35 d@$U $Dק fd0E8.MqL3u5ْ‚XǯfK6)}O_ V_^˃Hv6H!P4yla.?*Im>ReG9Z46Oha8ͪGKYr dgdo_ ^f<8#aȟm&3/2hs쮴#Mi^oƈt?n*3+Fㄐɟ߬6Nؘ|YͲ tn, 0(W.v^hRى1t"1?.Æ;6ٰΆ wnذ SlFiR%:,<2RP`XjRY?ټ/A^_,\ ;}ыc;d 7Zt0va 0Pr ? ltV8Ff(קBw,T`kL5?2Ji3J>//»e=evߖ2$d.!|s'>~'LHps'1\E`,~Z7//Wn}:qv,de4wgz bjhnu\{ιǸxD\D}(VNz\OcGgbs{ >=Fhc;gqn>~٦y>nn≝3k1d4|q|zV2lͣ;vBd=\nskݯہwXƺin#ܟ/rVACIgnX(0IjPDw%4((;(ϑ:cIAѺ38o |j;)C.4j#8u )x3.% 4FIh4zZxpf wc>v_0{atvjRyv&k ruNkXj1bfJmaSfAOa師JM< | z* [::l%eZ` GHH $-m҃t" xPK|-A͊ơ.EEkˊs}Zk P߲o 8l4~"\WeoNL1Ve&kE2{i_(e}NN*2L ̞Abkxx`SSCRPZf9ǹFy [_-0wK5#Q!P 5|ӚD*ZUQ8OI$b $wq܃0PX T2mJxyjBvX>/XM *un7YOzSp~j{y*L<@<_sab啨+9Vu$@ L*U@6 ~؊.x^8EhWYn{}]ևPC`8Ҙұi $uȯYΔ>i^b^]'b9ie0ռXt9 Ilcfήym /"S뙼!יv҆}ylr(!cșgt|w!uZz߾.D+{q"OAzsư3yP!%2dQ;-!\Z,bZLȿ&Bw!_c%-z'+jg5ּjN…V:o:8B2kʗ0+o ŭ;mU`VzVBD5V-c,n%sTڐي1ۃAɈr[]Ya][At&ߥi |k0kwO\+Gjؿ_E1co3?˗Oj &ٓZ'9U]*kQn=zMfpEXa>+x0 3Tz`oWDc"Pq.I *L@hj9-/ ;JBn*.~3f3V~2kE@ܣ[YUr.:+2C~;*{6n ~ (t VU6/ms !bZZP{Rvny<$%?ӥV翂ORNI!78GH2\P qy4:[}v>s`ͯ߯?zl43̓fOg>X8d~>?ǹ8w7߾O>T\cìN 2cIko'Q }TD0 Y3.Mv<73_?^^O& 森۷o#䈜r!(F#z6]l Yɋ;Nr%7>}*wxq?pVmXK "L+wr+R'[n B&{9r(HNQpWSNGbn5heW*2*NE?J2%-OTBkS R(﹵u bdhHAcFmuRm f=wˍ )7wyqێsvf޶i4!$5Zi͠ˏWP<ׂ92#S$ shv:eZKh ](IRz.1IqNrscLr,ɳLĈ<*PHb4aƤ9 W/u玃QUۂ@-SH)'3Uth_iݝP|jC硚J֨NXN+I"Q+ dHA)LȤTnIiu" ⸅Қ YKH濸lΦI27{W%Mp>viY{6h̠Dcr8!d] ht(*m@OQ]Hγ::!"735?ߐ~ ~4 -$or)"n0*ɾ AR$;X *QEO~ ijIY ?6u'w1q.VRZQ[8nR/㖑,)*AR)'JLI!IB&qK 2"%j%$>4^Ƽ\P[%#g9ʀDGFEiy\K"7s}'=I. Jd8e׼r(9 {D[d5wMCg$W-r}6& % /qRv=`}MQbZh8yhr솛@qރpZ>5cE,)rb^!iHwkhycBW.ְP nƹ4`dmF(M׷LKRٟc/f9M~-N?\^ǘ?ۣ#NLp ${/qK BPTΣ1:6S%~K̍漆[&-l-=@y)U@AzEb'jgѢD9V=t pk$"'㶔{׼9 <]$;}e(XMrȂDNuh̶?k^J#E3+M2T_KoKdh{ y HNѾƲ$hE8zM1LD$`b &LFC?#@Hu @˂.#%^(}-EX;IQ+A )pM)$`H+1#|UuЬ'CMf_w$iٸS!DImY:$ ? (GQ*J-Ra?DAF&qYnzOnDEnڗV}Ҋ&(}4Q 1fOv!Ik撊2hnWyU RREjėsA/Ra,Q%Z?]YH+B zg)}CY vm33dl,|b&u%RyҠ1nwǗX_~Dt*΃ñ1!?Vܼc4kPq I5ƶv;Ŷid9G3'z h-. Fj]t5Zb̈L_譹1RoX%g =ǿx۬ڒ[۠qOxP隫pFƣf"TH0>Lfpk#>'-($VV1fr6=g ̱o ]Cp+  ]A8#6Fmk7䂆/JAK·od$A}$g4Y{ 1δ0` Zp\[UUA`@3H>u^Ȫ2B>ztV9,,0o:q6)WX)sX>]CҭP~l' OMdΰ7`|Whl#-U71YG~pRNɍeJg^oY rBK;, }16Q7&(=nsp!ǐNSrQT-&(An%?7߼ h#Ma62@Z;5Kskmʏj%\w}nIp<rw 7{rhNЮ7':uF}&Xxvv f6ItF8#% Ga+1f7=k !0Php)DU1fq7=k 8UPhp)3v@ȫyWj1@bG /bt¨D 7ƌfV !hp1맃/bt(H>&nj1:`T,:*"Y s0h+8\t/#>!!2+8jẗ́ N5ZDvI.!i[~qͭhf`8& t3q'C29ˈ*RÞ(d#B BF9#R!Pp L0(IP Nt`jdLY* hzxO-Pl;Ȃ2<Bp:}%G /[vFD&eXtL4!{.o7yFH46d/Ҥkt7z 9c.v'8J*Z}#-E5o HTeSTڀjճcI#i{,ip]8JS5# Y7ƱɬQl_4/Z.+ Nd '%&c/ ϛNMi&0|ߞA"̮a 67A^́}bB}4*x0W̴bT (U J1EƳ,OSA$!40QifC[ Ji>=Xfp2PUR1< ;V?K-ӌ|IsYmPG`( F U엊qT<(i))+g@ L`BHf4CiF)ڼ"z/,qk@J!(& 5Y.<=N1w1@@U}ߡwo?+b_ GMÛ+{@o_FdXn(x8Nf7UƀJ.O={gi$@pFxأ}X"$^'1fjt~U1iTCmL}/ݢ[R=^:QvFu4c0tjǵfb":-2в9TpDj̽uh)zlX)w8x 'jUǓYg^+Yp+u7vXo"Vm=^E77Gm'#EZ!jΎ(?oBB+tGqp7_y 8Y"[̣Eb~ AIi; bc#Utm)Ji&X M ;׻53̞5~5u'CpdQvBa\?+BhƯIvX\3IB .4𺬁7}s(ANqU!"ieITPhi `N@,HTq8g1+_{=z7߯je{<#{DG=ճZ}M+oohYS-"uw5_P3E&l; cc! I-:#4nӌQ"yD&$OH8Nh+SKEs!%!:Q@mA<n_ݹbs Jy*$!"RP 0XLr q4i$/D^h盫⢰bqJx\ref]EBi֪ 0֊_k?W1Z,Z$%cA@z*/XRB<a;bUmy٭GNiq?-H\2 7!0r3w¬XMI'^Q}ĺ^@WJ{]qaq]n&p^sԺm7 RK(ch'E3FR "JBV}H+j1f;?]]aK0FwiYd1`zA{x]x]x]x]&Aqc&r2) Ǹ(]=e12S TH!A(CƑYEfl?n={k  uj֢k7"?17&DT󍭁6yGj=~T䄒U6T>"M)QC+*|[jC7jWOoӚm6i6VQm&rP-MT C3 [ՆI „aH+k7+q*:fiwb2 FdlLD1JXǘ)sDsNA109*gŊcc& yJ2F9Zzt6# r(Sàƾ$MNmc@,)ƢUxw{6ͬ=1ݚܩyodu{mG78U#Imy4 Wt>_ze@`4d܇& 1sT^ygb?݈&*H>VmxxVCk8E˂u>K Uܰ YO˼0_hA<__j1wp'I^ýeȉ9fn#a+V22Ľ4$4><9YB [X<\;3^H!.%%{əL9<+a(r${{ѹ2>&TzϢ]RP,"MYL)|@N7K5&ߨL1]A8$3"KdDSLޡJh1kaN̾\dž[6hgP7Ok?5s#Y-'{P5=IC.4 /rv7S$:geDVV.JoDM7K`Kmn.e@bFK-Z˥-vjũEOpZcj 7ݪx;}H& a^xK2y`O ]΄7/anC]\ƴ  Tzn^Kv y*MtB_Ч`ÏFK,L U;*'sTm IKƻ.S m4)0"^7b.N9 mG(^RXAO..۷ej%ԛ'x4r6 =lβ vfA u^ЌǢci.g6lHv4XE^ Ŝ2" H8P,Ee ?9a++UeBg3ڈo Aa Ό@Ї WA9Zd.1Ms6/譃9Q0KH95Kٻ/CU,KV1qV $A~-E $c"G3xوpo21i(}K'XX3b/.we QfL~^H y_5~eǛ Jx_R~_.\Z_l!S/. ~1 $}/cdj=LGI3ߗV-_H@j[ Ϋ^KĽtުz[IH))rss9wo8LmjC>dzP)dwM)'IEQqD3&Ѣy$K." Ɩc0F%7ץbg棋|.,(}X!A$)|0JC%Žk׌σA!q*ӸHR\ؑE\LeXۋdhxz S$3݌ $3`w{4:Xb3FkIdĤP  h-FC*=:ChպJlYtpL1bp|3j<}"AW^LJO]x+b+.*+|}6e7z45Tw*1h*F(vJy[^H)JT gڪRQ k Giеj &ze%K?vDmEJpɶE',bڨ *:31ΥqD8 6Q,u(:urQ+Txz^4v2/x~*l ObO6sl}zy֌Xy[ZPƦ9TȀ򾭎E+iGm-ڄ/5 u = f{望 h"]0̓pOPFKW5)ӽViY!MZy%m)G'[v$ =#zYI='f Qj\P1@?3S!}̟T90 zGP.`!i)Gչm޷JSp PdCIx[9kik^wc͏yvmb8]M{8=mx}=}>99X. T&~D`\f`z>';~u1tnP|&6WFP-`$2+i!n5'r#g3$Q\v灅pAS||33ۛ]c2?4TZ<.b?cҝHdV&ϩ1χZ! mUD2ꭝjs,igkQt驃Z(Tjs+֝c]M8.Aƺ/j%;neh%1g4nf ]k/wϿi/~o 6{}UX!,$BD!Orc^G:W&[4oǯ1ǯsw>M(4!IE H!c $@v{C5SUk798`~J)^^}{^J^YRƝ5Z͕&)F=S8W x\έWסZ%x 2~tɉ웜9ZEמ9Zv9*烌+SOjlq)Vݪ1 93)ד557=DˇvT՞$J>8 wKˆ'5A+mœzﳝrfL'iL(n&jC#p 0{, 1'L O{M'V(!FBKsL$B D~n)w>HQu&}@wS; J`hht6fo1>XĹLBM4`DkޥJ6j|^\]"+N|ye ߪo(_t' ތ9UMqߘ_Zk)3x9RH^gQ{ƞLFN `rZ tO#'m@"J) QJ 6g :y4j//]_>+nz:S++m\IJ<Ƀۺ i:%+m`b3 d]*:}ڧv9A&/Ex:hy.A-$׍LE4mdG˦^6icPn`ME'_Ht7Dh\?. eW|T\[8K\ rfN3Q@8Pc$1yΎ=a,?Ai1}ŌJq\7 /LH"2!-fݦo]fJ8i yRć A.{|*xÃ.3'ދRv"v y@w y@Q%Pu0YR9VT'/bWUP PXm-TQ TULڄd+"˧1ޣ-%9php)C w8aS[3Y3y7˕#{x3_|Njo$ .e4%]H%_*4G#.O!5!o2՜ÏV' Am8`f<\r WoBI.34WW'݀xC~|swQDsv~ߍ쫟yŷe{w^'[x{sq1}8#o=v2p4|~-N?݌Fx}&pmO|wy7 Nh~|qCєbW_-sEj Ndh4nIR>2bp.o͝Fi4TStQgB4JS$RSP%Ix"U<\wZ|Jw/) eMȤӷZ3^!1G/S<$9F'@*Eb,u%u|IKqשy;5rA<(wj.#418o!odMVM;ʱ ah-s>XنQwZQw%CRsn=ܭg rA[OY>#Ap7u6k=#9!kdAQwZQwÒ|(oycZtG4nf wd ·:7 )@ހ^V!T h ZċxFH uVK8X 0`^^:)d$_qt,-(>[IOms\)I8!A$&9vb%,rh.ZY`Nh]q)$AQb2#USMDOz)Sn0CV#AC6d4E6l(P.ʆrꚖ5P+-KaTK?*֚W8gڴ]Ne,wى/_d'(;U//632'TrvV922Asd_ކfdJ%&-6D@Uɣ`}{pD-#^u E;R(&_B<#u PX/}S?Uhydz5F%%C,ZD ^TF<-*Js6Nk6Wm\qpb[9KJL QԠ J`ȩP-UYgs դD0EФ»qgơAGbP) ah6*`,h15k3GmA%92#@jwd,E!BXֈJT8҃ WMw]"&DKI Vr-ŅzHFTz@}5[A!=>m3fJ9X+]Aڏw8F m$~㊇j\gK J߬l۾Ϳ,6۟'Yhd ^Swތ񎫛 ~7=Jp9R'Ey*৽᫫kpp&YʶN|Ğ%͠=(+D A=$|R~dZQP.uV40oB ih1&%9#sZ*؜omc+?|0b8Icу܀SYR(m,SS\ٙٝ؀$(pxCcÈ H"ySĴ_ <6rFK3$yh&֋BPOTc#K inL BCy  z?pbOKXpű&Mz L`(&@+q/|0E 4$@]6 A Vڍly!b4.n!bO`yF }v!R "NjpxunLPnOX>(+"@00^.̘q4_3T)ybqp0D+V80oVkA?U FKԖЃ )zl;EQ*>с!V<䔐@((#< 13Frjcru0Ψ +:H"dj';N2#jun/0xM\҇(2'4fnBs$z4]]{AQ!Gt{֥qeX {Q#v *w զ02i׳h]S"n"MEcgé=idoxGwNW yN#z*y1AajP5n'njh kLC|(!`VC|nMn`jźij>_^ƺ-z9JqK9'\WVb*ͤ).{dB<ѝ&qt'S~M6t }h9Nt|q;tԜ;0a'ͥ9\ֺqq[OFUpTP G_cwhֺq\O!8e=4R0J!pqm֮Sp -u]xQz(zQ!AHjwD8?o|{x=hsPH8T$ǕC͙cײ^զA@ n{Mm-YW6.khhX|)g2ҰIq'`Gtצ,&\}xHrur}gsW· 0%ʑͿ΄7XEm+XU]X*(LS G> 9Ł)g#R{^@p8| %{Y4>^'1=Yf[s;7sܫ^js$J|̙ 2@f$SoB<å-J <! ?Kv?3ʫWQmx.t16P=:+? ]#'P(i|0d+ĦD]ח[RRc$YZlCǺY5VպX#a8F)%ú$nq5G6n0Ywa|mʯ&/v= '|XK н"25/7ie)5.\R>IKę G 4PĐ5٫%ZE5'ZIwXj=^kF=־]1%]OSO0!i$8.|C./rPUkF}l{ GYG/^|WU^lr=gGN$G, V{3NKS{kgo gщvI횽Hq\2zޥ܃I7.hlݥ<\(GF30E;ΊSNޙbb6<=o ^-\yi dBk*`A[-F} yqsn?;+ԾzX]5 loo7Wj;^"9ZNNp{rץRӽޖP'(ln`ޕi0#MgY|4xϙ `K=܏Q5X K= ?"TEb0AJNgLC}#B(ն5o,p;5h8uLҺݞ#@ Lnm{![-lm+ҊE'q Ҟ蚧euCqK!"x[jh3ڊ`Y;Bq7t}f* ]zbv0gxO!}m bg kEc籖hywSZkŷu(HTl~i:L1r(MD[d$iMpI$ɋd ,/:/f4'|F/03ʳϩI۝dq*QG1AX#h>q4΋$F݇ߖBE~ X<䅟آ})\Y f|txt-&9MAUӗ0owOfi;_wfuvO/I﷏%/wz=J{o>h2{t]OOt/Ӧ?_NW/:_o~>?OS7" *+Uߏ-Hy_%s_a_7 8 {'ޱmp" ?f G05YI[.N>*^œW#/^|kq~^jߓ46}B^o/Lizj$?*||O/'w:igw xk`M{3rauw0PX]v?mS# Q&efydܭpb{ xtdz:RolnH9w*nNX4AmϦpLO_!P?Mz6/#| ٷ&^C3 2Cs>}gO>eG89eT4}p҅/cPL}C/׽.^}N{oL!p*fld}^L>ag~ ߎF); ~ML&Ӕ[&7!i?%q Rޤ<9_ch3p/v}n , _ !3RO']}AaY٨ G}j_%ӻq~LM!JK;2LN^ZKҮN M#D2oB8W!@E<4L0$N.e&>-OaW{!Ět?<~u{Ws*Ό:O8aj51i|@_^]H|#'j;:H"Γ_AN.c_ rDEZ {0; [{knG戁gj@?{Wq C},8AHJa6}A\ˊV$gx@4p橚ґQ:|Ѧ ySݓ =rtDetN:ջު~C N)hR ln)%I& |Cv[qs.ѠDRtAO*7ܾ.=<գM3TgϧGq(MR(~?hm?TVWwS2!+d9&SHTs#E$qMbŗs@#S#Ӏcb,*H3( orvz!r)L,Ar~X&_i ) ޽~ s"*Q'5 n%Dex%6kCnNzxZqHJPdU/!M%X{fkLl_".{pV;Eū\cQ{iKg v0e Z(΍s,S e}؉ŢvX[^:"Ċm0̰uSgoRtNvƔߦd&I {TBhUQ]$zW0  *Șj!HBnY4>wk-6jc8u6Ld:-ѝ@*$k񷾷]7fRr 79Q7>/s.y/2l޴7QY=aP~?m\OU+qʐ N< \=idaUEnY^ΦJ!k6|J,،Le*LGQ0jlԎ[A=c''Q$2/e3 1#fsbPє Y >jh'r ztu TrEfzVeLAV{d=#!b+1}g E)r+J@$o[_o]4rF !-3A"'| ,>?k.)J =nDgp{x}gjtU)T39=.y#t巿>ݙ^F\'q_sّc}+߱##D yɎdߒu(ځkct_`Kw\цsyI]43u@ Q@gË /;D0&чSĖ.^>WMʎwUG&|ҭX2<>d`t/ Cw׉n޽۴u3_B_Q}??߾ G?|TbD_o:U;0PFw͗~}Na8yF8. [6J/niQ ţo8'u(ӌj>_j)Pe/hqJ&IԙNf!RMǴ'IX(Ry#rMQX-qKYD1s#a8eGmrʇh// !< zЭ)>\oVF;%ZZI'r@=EK%R,L^3צ"JƬQ#Rdpу..S4kv5Š~ELE6!\(|U%K6ơ[Ah&Xٳg^YLyJ3_VFyRCRCRCRCRCRCRCkL27s%wLG۞$c01Op`Ca=WSEU" FJuF.5RU]]xUȡsK)?}[ q]MVBDE8L"QZ$+ekI5BIpN#I=32b&EA9AQnJ*!En!VJd\y>5bلըwƕ~$f2qE+0҉B3 C30dzH6]7 K5Zp 7`yKӖZA(IKh;2Z@G_ 8&+[`n}3*YL) )OQ0nqIdV+ᅴK!>&p ȥ bD^"ZnqAaFY˘"&SZkBƈRa({!瘐bR!ǣ XOa,4c1mj(tNi8T3&ffC'؍OymҎ`H7ԟ~d}.~\x8UaTNAӿ:v,x~%]V]}pcagh4UlPL $7Z}4Q wX`JH=<'pp. Lxˠ;"8%TqZ?WWٔGH誕?[q1x%6ҟO=[Nܴ{|wׂ(:cat&MF:fpHo,C-4ἲeZ": yz3KBy"^B]ʞwmCDDW _=5QnذՆa \U?c&9E\ I4!ftjo0Gc2GMp_<s/Vz%%1%ϷA4 vlBnt F)]loB~|]rFNvtCox$[QFBW C8_v*I:kbw wߑ0pEj9DjOj5"j%9[|%Do!99 ;.P_ 4ɫ\irVBDw\+VO|oX1ğYT<59)XCqm|w,S|WhnbޒN ,Բcݼurc;}vHS\g;tg"jߚSga 9H fו="9":9PJlQ)oZ;u EzaAwN=d8 F-#+x?(&'{GgV4&7,?w1J:^*ҺWֽUT׽1#,׎#ƈш(CF'l&@LgBVI"}OsW| A,`9}`Q$-u)?4qF0ib45Ccv42)>aDV%O A+0 + D#V"0)BbPd<88A ^h&(V1 q*."Ap2F2 $9jfw{#gM:SYyϿ7"F]taCgdYtOIé .Ow4#.3P ꤴ)l9Z}p#JqYnF!{xC2j\YH4NUGQ6!| ~:K*M&z6X3Z =¢JtHi *٩6ǃ+nz8KW >&uU{_ڠ-TLDŽaD=1%Eȡa`UvVπ~|K>X_)bk$+5n ~»͙5BoP} ԴO1_\;i@Аx{.~,88Ȓ׹hC^a|i^m,9*zwppio]%Ԍ H0"0lAyͫ%H{J RNe !Ʋ-s'>wm}ZV㬕ߖ4󍟷Z;ZMj!oK^ɖ,>?5TY>{0Y  8.4H|(VcUREE9YC{cZjmt8?h}abЮ_R>[@׺A#X-?7&Q8]4;s.KFĦ;keӁDSu(!yS%^@Roܐ(hZ`ܾ.X+ɚѾx6kAY_^$n2\iUqîe+RA Ycj8@Qq7LYIQ/yHȀh;\[Ie28X $&+#Qpe"Z>t%Ź]C^rƤ\CrF>)'C;5 C8iš!g 4*BeO໢s!(ƎТ3IAa#&Rʹ7#IB/[t=bw03hQ$M6ux}#fQ쬮~dX}yDDƑ[ K.rxHՠZ{UJ\E;*1bC.D` $rf~T)-0uu׉ ;)aw u8"f "7yh>id-wiЭr # R@\\n.;$'( #QqKd`FÙ7h,Qbaﲸ!⺲(R+cɧZ ʸRN6%0GBm C 9 -U#Gb_FjBc'sOl4I9;2 ()6E#Qqd6c(Q9%QMTi`Z) @H &zX<㨳:xh&Qyr]7~AKXkY.R6 G#:1SEh.O,˸P)4HpghUůC|M@`9yng[̝7wP5Jþ͎)שکک `sM)A +wh^Ƒ6 6DrZMKnjEMT h`8c劘#qD_P 2N*XH2St^^B$٤3̊y>x|<.@E|lh91&_W00!ED ;F5#KmnRxyXR~ {$HSIPyKURL0RZiIf13f" {//J*;we:L`!bR{/l9L"e7O|9* !^^A=|2a I],`SY}iV"]8\"8x V9`5 @_8WNw_흝\./PA[94WB)H!p *I9#:듗;NL:q;[q^TTA 2=N P0r|\ K 8$ $quPBH{&7gn.)c >~Colh!_f5U8ŐbO1<ͩ"CU7kRa.Z6dT(M׻nBuV P.P#\O~s T]F奒@n1q:qO)}4Q&B?9?;<+Ƅy/yu=ڟ.۟n>^\W0D %#Xa:y`2I\&(G5VKd"Ν+|kYVPBSus}lgq(zuQr9#y 3Bq1@5F(R8J߰[a Exy r]- ^I!)! raO[卓6L %d]huu= f 9it+'ɿWZ, ыR8hH݀367$LYNB++^RX k%M4(lY {\l9`Lj]h m;{ДZF.[q]֮ȅY֜Fs]AlAlyS([S(޲sH"l͢է]y4wEW!,%ұ:a* EוN[`^^9Y㧛ٷ*\ܖx 8YnT4gѧr0oאK*Ӆ\zu MPYA@{Z{{gaͲ8?>|W5 տ-~B`MOsONv1ctJnj1H㿏N㇟#i1^l)1y7y"JŴ(\pH{liv6ٷ݅(6er}uvum%~W;po (c~bPLAA utB\ l]8b1F6rH/3\ <@G X ځey.dDp)>R8[./Rn: Θt 8^.Lh+DW@WRd!II|3m%XNC02X@$^ 5+"$s;Tj[q}fH^宯gw3v%?ܹv-/iV+SӐy hӡ|}=-9 ߬K(v\w"y-~e ě)T{jc;ꀹZEt|)~ZNZ dp/] d- ſVg 1aOi<ޝJh$m ?$jN55ٛzմu3,ƏBrDiUJ\!ӺGfAUK]\RPR^۪(FPӡW ,u,wv1{ʵڭ_WVbFnKۼ-GɆoXTDIs£Ek#/|EtF#>.9Elto/nﯖm9b99F(qփJ^\CFh(Q =o)/o؟5%]H5lR8fS|".?[yVޞUyMep]t"@*=j#$)1ܪ ڛQeGcؠ%J0Nc-O$wA9i >N,qCu>הF=[̮:V99(%y5(bP*A_@kk.ox* x.䙿9OsrN5Ip"W_Pw8ĽqDQҞVaҘS{ARخs!(%[ "E$*% +I )zCۢ7 V Y-q W|_=iTJa8"zWhb5Ǵ[x V xuQ]!7bz~fH6~)(z'5 W0A[y7iE)0ޗoE@7|cҐ&]27oC R+0=IA~)X =PtCѵ KqB }F=`Ysnbpjܕ׈?=r.F_5b[0 I9[Ō.}ynJٲxת-RתNNiRљ|dr,ן¢muvY&bZ0)aRxAVFՐ _MdCtZ6JnƔ!gy>?tʠr9܎=w}Ϩ.:]XyoG>mY G~sK{aQ$Gu2Ӡ(G1Ŧx>ݴlDVApK(BѥQpp!B@{j c!85'4&\Zr.aG=..%eM½In]oԆ"B͝Fix2j#CP| |m Z2W8j-R DNJyaݢj3&/5ÁZg01(H:t^DJQdDKiP.FɌR+iaJQ'19F.{VZ?8q?HϋBX^[Foѭ?-WLz֐9v9'O"ӸQ% 4'$7j'7 U'ԩT.Ig0Vvs,Σ48GҫdO}OX] ,Y"RJ.)2B"AZ3gOc|OnY~wUjf@v;LJ-`hLnݮ=w{hx|r|;>Ix*=yz cl1^B[¡ PT1aRk|ooHJɕa~KO^]r͚| >MEQZ7P8z_M=MR{ eŀ6hPGBL1]ubD'&Sqqu1ā`t[N JuM[>imOtekE:ǹ$&>Toԫ[kREfvC\rf' .R؇4IVe1ɹ0˿k1ȒK]'XriFk> u3Jd.wثqB *JS`chģU[*WF}V׵/l9#nDVD:GDŽ\7e%^O+"dkǣcޘrj1"x'+]TDM7 %<.;Un= Tjk+f n䁸@]bOrk] $LXH`$%< 9{6m^,舷/w.f ed/ڮgj{8_!]]m@pX'O_m:%)'dIiFPCq !LM=]/tdkf8Q#l77 iC'jْ ud!ɢ1䗇?G>5yp1jn*NKL?^14b<fnj>ְ.n0N*4D߫sF&SXA(]i. هI8S$h d c%f 62&cPa1ցY9\")$X@%b4XvB T>;{f'Is݂^ص #kt͑LCWT^XǴDBIK:151IYɖBdz7|en ۢȈ/\l}߾}&aA@w?~dy/O:;X Yݽ(FdTvodzTVo?߹Oxwד __Z] S%0'<~MظzO6VFKd|4߱gQe &c ʪm@ odwB2'8{\35qXA._ҍVcLqOf(vͤKh&V=9Zבf^)Y]gKXԡozҡ]?QRQ=p 5)TgwD_q2i}jdj3՚y#F(m;󄭶͟$viF[qѩn18۱չhFCoV͜;3gi g\acᕲ)*j!-;)- ]Y .JH=0#O`S2IuvBP0'IǠi>+m0pVj|ZҋEhuv'gL>-~ Gi<]Pox2g*ET:%QYCEA8副Og'o雋qxs7fڛ&wxsm^^Z#:Nh.MќۚKD]J%U48s-XDnoCnd gmVRTrlZ6K:XY3-@̾X*U ,&i&VѠ\'-B۝O]hz; 5Ueb.V24"tT.`KxEoyI5{ŗ 3nOC#=г{f1+9#eoF5 u u6ypꬅ氳8>jky^6SzķnԬ\lݚhCnMed{C[ΡiV!l6LB:z0jxFBHibZW܈@_ ofI Əɽt{җlȵA e_k9mxW6{Fjb uџrH56i8?ol"Qַ=j$bVX;Vz^Ϊ@лCxDZ.Q52E*7PDT`k8>hF/o}|q$Ұa^]/th1TI_~~03HuEeQFk*/CDZxCNBQ)EdH$$$HZSHJſR$2gh# lŻ˼3yanտN'~V%INnE1_,ko@+xcȑ3:&CeV;o99$up@izq2>Z6NBOpۀR3رڢ"A,[ ).zYD耕į"J*KSrn8mQMP -;ʹCij2 fSJJQ#42Q Q5O}%AOnM*d_{4]yp<|X(EIW4*-*zRRuhSC;7)ڐbbq"E&b|2Ekӷ5 þempG{qE3]:Yj~g^h5u|v5-P˧V/q|dfDC^y=!lu>)4Ǜ9W bE~gTӤth>-M_Ɨyyuv0Aѯ;t6ͬx?g$w.lx2}j/d͝ ǯ~iOX9q2㧟;WoF)$-$G)p!.颫?[N;;CtdjќL'5Rwm rj~6cT-Nv]ޥNO- W]Vve1~8滿VS.?:4:pnY# Eɋ9FCT©Ʌh˒R%K2ipJP"mt]`|]TaގO`xX*Ѷz;i9_OAy4?gC?i?OY>$zwBm!Z2BJK:dF`&C.AxVE[lY@TZf!| ׹%JPfJtMz>-TҪU-/&V jqp }\5*K@6Arɰ d(*r dc!Hd/Dtp)ՑZR 8 y% ÂTDX(LR6P|ҕr9E\x{=wu>c*O?,Ge咃:Nɛp1k܊V7f#ZǓyNŖd><;k BUl%=L,9al \ʕhs%v@}BpQhN 8x BjyH}l #ql]_̪v ̺<>R책 :>xl6h#;gx羾S}("G6$M@T "hNGa=Q^>f tjԧ71?xk]\V@^f~GeYA2:O6FEы:rý *h%kV%Y ]2gQ8d*.tJJKjqgG]LhۗǺ`ײ?Pmx&^cKjrP$e#LA dYCBު ]h6 ZXS [_@`TIq_!l5 V=~JOTY$W-]Т@;wjCZv}57Eˁ/ZBx^.@xp6H97 Gv%T:eIr$/:( [5$]*`V/ bFߕPbEk?޵6n$"AKo՗avO&O9"ˊ$O&Y?Ւ-i&)KdůҬn;ĝGxI3$\ {l7;_~1)n^\t>R<yw JhF7BX u6KEF+Z qO)>fYhg0>0Q3(rs0ףR%&h5(J|!r}@y 4LPM9P%#hSgrdrIz$+tQl ݅@OҪXn2dǂϒ!VI!btE7ȵžaQ1 Ck qXNDeVQLV%a9.8I('EE~/ Ji®d68hwҦD=a£NGk) r)eP z VKτ^Iw/gB;JM Aa  rcw݇mƮ\vi}0vf|M&x})n[vZz.Q[4E VG̉ DE;V;b4ݳK :o>ͮ'@4". ;Rn}0 $~_cJw}7VT\zpuPASaDY5 pjY}yYMSܢ+G%RڔrWؔ{4xSJFnR^# &tUAaЃ>adeq9'MWuGB>Uހ`2 P'?N{C~1MpG0. |ȀR= b@.ik^-} ١Z)2re<s.ySPFi=iMb71sJެ_(YzonEnlC/1oN rPm^~̝p"nwl'm^U,ޞ$\D;TIOK=&8= &m뽈<`ڭ{VCևh''eMuvl %!al6NЦsfАNy]B"4oϯF'pŨRzp7En!DGRѤfuRDҊY5`\7%Ra4 vQQ V3%k\["=NDP`]qk!uShq)ҒV*HQQFAzVS+QF ɴL< vCPq+Mv{u|IAQV]OAEz z7fl@smkjOPTD26Jix'B$qtV/BI~3kBZhٻZqŚ7/A@.ɿR tpX TsAYBCRGP Y0&Xx}3SGPջX姐yJ]3yОd(a\va BT*-U (AG.BrՂ'RF 'a?yM!zT+!L+Z ~c,U WqJ+ڊlZTQD'Yʼn֓ CpTI)P 9D,p L<h]JI0ԯV; \r`\NBOBR(ZX*nb@UpNz* vHHXJ8pN{/yDy<`IP M%Qhn:͎5U}7zQ_;9?-A!5^~ϺՉ꺀RG3ս{w 3H5lЌ\U; nPmmmLT; \ڸ;3>pgb޽E=<ʙPmMQ`,R5Ušf `GܰLhd*n"me A'Z#rWvdCّc\Ɉ]EfiJb~{ Ge3ӹ]]{s..wqw달^$Qz E꘢DzT00t8&!yoTDSs'4[ϩ}KODO9ўd 8.sa/o4h,(9d'|ӵEO죯վ![Z㵝!cfU6w?Vhy&/_bl`?^T,0O?.5=>S<.tc͐NR,(󃃨;O|LCJ}DQ8P>\ŏ@s;l9xkK.cJ<7X@eOb{q3şm)zIr\K!ͪy+3JB tXa=c2NY~/R&7؝syʉp {neϬ)E0Pr8s01,Ʋ94>k a nW3:@j!g Ֆ k\8k&KFn@:b@ \2W BR ڂY]v,ֺ( r$0w@iOH,jC G|sjG~HWY\ğ W;Gzn@g^~ޯ9cܜ DW\Àؘ\D8&QN(H (є k#`xH 0*PLi(Bft,<8R"`@3+iOd՞ߩ~~h) Oϴl `E :C57ܫ5Hj[^;wuE^xjג:dמRt̥)cה-%k%[m IAB:!͘qvIhÙ (Qk(ob=S B)ڶG=[(TKnʚ[ /6̖v[/Tq^:kdrBV =IVE߬ʙB@ÐPL=0tUPicpq1OKz'CP\ 2, @g97bgX:%9rr5Du'rYW Ę" w\<H x@FwfF_緔K$f~/u[oۍ'go.(vm)YO#Mwk; X]K@p1P3ֆi.zMUEP -L4I ֨1wdmvpAp`i0L"xGt"wSD%NJbt&Zbv,ܗñEaGjAtoGj4 h9 3/ \mH91C9kH9[ ZއBo5-=!/'[qچ !9*.r=t) `ͬ`>0Xspѕ0:7%rt{E ۸`j 3jetFjʃBGW<l8Qܤ݄?umh JȞbM9c^ }=,Ü-b"E%Wn/\o@ּϤ&Q0/~]M]Hs&E=oQ:ivWNA8Px[8Tq ՏiY 9Gnl=ZlQ1^xWtIA9W7ϪO|Ph7qG26x7;T;tԑ1}^NfT毣jNvzWv(҈9M҃3A_ F =\ L{% +OlN29B!nrALp3O?~KGp kxCb"io Õ&AI $%#"8zO41\^1e{vokKHm81dB=2̓G1,Qnxp=h c<9h$J6toWY3*+f./gDD13TZS(! *3ȻV$ 콥E-[qMHh`9^8Ȑ@ $KeXUp:2ôWCoAk1̧ȭIu&R )3C]o.!m9 ڨ0G+"&E#r! L !Τp$]Sզ|q[E>N\Oq&մdFEH."QR4[D-{nVv .8u\|NYN/'[j>v Ah?= @89̔N|. eH=PЄ{B=R.@DCk:^-4CesO֯gv: WV?]A*>){K,G5SjÈ_iw8Rse%n,:.W_=Q^`  57bC0<{͙a$g9%%[ ][sF+,@U~rT=v9Ɠ?W<('|2/,8e|!d4f2X1j4L焲#pE?'{`s*Doa}xLIϬO{Tsq÷s 笻gQ o"ul^*72uQ2уU^$w֥-ctEcѫb\n1jpl<$xlwxxPLZZ 21NEҪ̍Q9K4?A ~ȩ#Aaî(XN83\ 8Q0 %C! / @[W_N;ŷ6mPVvQmV0~zV=xPɗ miIbC4ql8h*k)zR+79-[G;M۱O0bp~*Co O H,SrZfa4E4{jYzCz$a>?%9O!X:xC05U?c*sCGH&9NĴA 4$IBP`?@B{ KE&9/aNV}(hф`6-,LrFqGʧA Ǭ14S{8PDFKul>~<#Daaf Re"<2ASr6lT֘, z>F򢾞YBJn D1`9(ҝEe ZBkcDq]3gn׎V3CpU{\߲?&:>558P)؛qYLf7N|/'#SpIG.JN44Mh'e!ǥi6ZKy'Ra++a1YtD@ty&G IDֈtq@`x07 obekQ[*wjĹ~Ž pnqEo2EHw|'\31gCy hܞЁd**h7x/; 3QLUDZr,(7-&trxkXtn~7ӛ;{zM1INެRR&pu.s˕yC\\W+ŵZM2uc E%FU{L ]a%_}u:eTdr(ӬmXLe| ϒaXb>Y?9(b Fkt5}7^"1./>j ,iE$z1ۮjo=M)]xt%)i=TE%2{oO~ ``$s&eZC0Ɣ[{+K^V̢3vF#[-"~l.!} V`ehY GqnkMXWtPP#M֘&ш8ǟDdgdD(o91KK& i1g?0eCM}unM iYkXhydxm5+j19BuFmQc i} o20N3i(5U|&=/ AGLͧ˖'=8gp-r8XS^8APQO`*D2)`sogV~Wi(ĺ9Zz1MvMIO?~uJoh;\'.:](!C: 㭯p٬Z:>o<-c/qHn!s$Nɐ[O\.8f G|qȃNtʡ`% 1ZVhd-ӫHrݹ!_Z/ǻm(jixz]v nfkTnJ<)v.Emې"JS74CCpմ͑ier"Z`^XE`-E0`4!Xø Q9)r(k.lJz6Vt@SA-{o!:&|J$VS5頡.%Ur^.UįKL {]N#k H2D1m0=}SQR$,L`&J3k@:׺2oJҽTĩkB"Tcalف 2uxzg>:qE|Q Ȓ ȳ.A C @NBzofݤ$Q'jaU`mPfBA%%w;(k1X)fp^B]C`ߏxHD@W2l7[WGF6Jz&ud\/>=فwn?_FQa+uH. CY]f@e8z"k;ޛ-|1Jdư|$V%#0fq̛y0I=3^˴ޙz J_s*M,.cLI&2nD'Za+SlSjťuT+riDIsEp@2y١قU8zp}t GӱptImI[D9cO2U\=or)P D9q^kDZ @k#eȴCATz`Uk<]yzprR/>(;ӷB*u_h7ƞF7_%,#̧ot&RGX3w.=K L̀griF^L.P^L?)itƉͭ69@guJYkĔZjH1HeCBg kM*cDHI8fub7yC P ~GNưPqE. 7c*H"fh9xe'Q?𶷸S}?Ցdk:?S3GS)VI9 #}2@k?_NE&Eꋱy7J|7|;m69I}(Pw">mV)[n$|46># g rp}dEHᮯ3@9\].Xjˋ+d\z坌\\>u;#ܑ أt͞4} 1cmh|A>.HBJD I繛/ѹyr7pTPvjZxl& b6ʧѪɨHٸs5QN__M'e<-">^gWdk5W6?{֍JO%K6U~H٭ݭ6y W[;PTNU6HJ: ʣqC@kmWpdbCM~9NuG>fJ/ﮯb59uە'~Wk-Fi?Fl%giHɅ}.'fפE^ebPT@Jˠ( u^Jg8JD3Ea+nAM΁]0IÀYwhIEp,Z$\#,j4:₄upo Ƒ5F MmR2Jr9aܭ 5~p. w)BxQq[Ka ĥ]A0Z8}x=\Pd !zM1¢NrV Eڌ&Vzƥ"`=^n$BAHp nf\.tg l,ef~0\6ͭ8B(UƧ'-vؙR[#g! /#E:`n}C]׋.3rXNZ[~nNiy  1"(egT;@F^uQEl/5Ƿ cs[ Lsr5:ƳC ΒnVi1{"ښB[- >"ONi/v=@}bGRƅ{$E"ܾPfڢȝ c^>,19v;Z##څe\o$;؛>-ΑƧ3jH-ʏyǔ!gBN O2z Ʒn6Q.3e')0hz݃À|Q+Z;8mnSF>_wyDŽ!ה j|na1Z0҆!ᅽlQΰ6 z.48Ja]<[yh먗m+ ٞBn]}f2RnGe#va5w|w˻Oy}"[=]++pҿ|]ro~wپKsR;_v'TKZJ V^rgUHaؔEzQx'#P!WB)ÍZytYxDUzg,Bݝsc nHLa'DqٴHHrcsl H!"UI)LTL[F)M#w}z 0 H4]G݁c+tz[s;Cn65EABnjQ;ᡁW301pa)z͍jXN)mpɄdTRS ū僃 H}'A3N cuZGi4$ p"*$JjH ZЯ(x!* I_50tIJTrr n+Ҩx''ڪRF)h#EN 3zuEj%ЊjOv?{o؈#5rzhb=R$3 kTI0}(`Q:J.8V CN q&=CC U= 0RlGo߿Jd24Í]؏e(y}&]?V1k yH67+2QY/[-}&g+wpGqf*DfʼnQ9^'}jVJLJ)ήnj{?P5 <$='*>r'Q嗪6#xVƺYQgIع1@%bsb:s!֭+dwUJW۬)N6GF5ugO{u۠.r|=!Dn"e+zHۛ[S ̞|SՋ)]$1sk)9Q j6㙮zoG;#\:Wk-5PC fY[\^/OkʷMwϥ^8'* 2K1X3kyC5HرR/UV4Χ65@|?+OlR~;&!G~PRcHWtFEh- 3Y5oQ#5' 8#J}YLPhgOy1Gӂ)sD12_ ԕ?.FX_Nm.Y6 ي?<F*]ߚ5r]vc\tw<9W*Q5W%gTK0}үaU=J.ZUz_7v@W7mI*LN^U¦U:thTHe}ć,4|В[ST◌|87):Ʈn⇧8Mu%I'T% zzl{יWۿy9զ;?{.jMk8jy.|e3kdqk)h$Yx88`y aZHTʩ e>>\ɞ'CLS: <3cYNFb3'(#,I)gvpB̺$;s#AQaAVS@ˑ$OY@>%-iy+|Y\ΗнK(/K܁pT"Z/X˦^3Ţƙ*wܠNc 8lpB^Z Y3̔R^'pې$L2N4G= x!Z|h 4!Jy09yORli8i\0)(=^y"0^R6tBRR]@J&=yqp̺R;UV+& 핷~G3AGK4";M/6@)dΤVpٌgx5 T  !^'34Nw%b`iD$RR6OH6)$r01ђ\|e u|Qy ha'JV}- s"ᯋE^JK MM%mr5 TlĦQ0ɐh9QP:_uDdl^d6$*5x&$'FQ.GYѩ -clGB'@R4`GG3= dۿu) XoK5M#`  E`wMD5d8h9WK Ok8b:3dMLB`3a)է/%  }k!*WiNgC~n-7h L7pGp@fM4dCI߬œᤙnuىBjaEO fgY&,uF'ӤԜJl!oQ\xP!{#})"#\3RF qָj.;t Z'< Cmq& o;X>o)SuF s)?ww\Qb +fHv>mH*HP B^ z+'['kbrn-.舚w0Gn.;WZbf5x |-i[7] Z yYS7 ˓%Ygp0Ȝ!A8` W3ON֜c^ס vݎ.DU\n]_Lf~žub/~~xx~*zi߯>^9 V e {O)驣*IPDOCI n]i{?/{0ycℸI%5!'vq kb̫v n(;30]Tvz88$Oݗٹ3۹9K:NLy[J]GWדD~m~`6'á/σ{ 09TvҺ5<__{c cDxGvpԇ]1߾#O\8 Rr UFEi Hb&;i$f-w>(m/P4'HxŔSqrfIgqLwBjFHU݅Łcq_r!))9T(0V/:}:9L " NF/ǫlCjЕ01 Jܧ1%x2 Y\~+ 1S=CgA*4D&QSKZݠl%@QۀќMʡw”ユY;Jڨ=~prp{$k9drW}a~OZ];ah~vUn[=\mSWO=~y㪉P}jl B;oro}(&\(~ѵ/̒&ؠ?r^sUG^h\ p(=*-"8c؆&tNs,AÝ;uG ` w}AwW0!ɸ QD $ٻ6ndWXz;;2h)ۻUWIJm^=uH1WI%,Q3jIXdXě2' [a i(d" s7c E+#_j|ңtB[NVWTRu7B-0o[c攒 POI9)g9_2K@y•jqTA% ^, |b3HZER(6gLke"$/&xy`D\ϻmAg̛K}|&} l:IafaYkM%~? 緫e^ \G^3td }H߄MH߄M>"z@k$4]?LCH.e!7 'Wo~-VI|4/ I H;%'![|kf,&a#'k}Oy@J,Ub8VZzǾȡG;:rFcyQޙErJT%G G*#i`-0z0;|q~x,.>iJ%e%$`E Q>TckD#Pze-XPr-V/.oEL>ݺd'xD%D6h5wDﲍD*s^;~ӋF(F[l8v>zMCC PMڍ5f HYVc62Gi6Kqx*,;jjXg5VZ~29kB1*ԑdRijU l_Kr$XtB+I3e S^aP6r T>q޴]¹Jbb(OTJ65 PT'`đ ~|YmS(@f'vU(0Z㇅?lǪ8o8.*y7c;^ l|x;/ﴷI. _3 K^oWqTfzU$X&ma(J*?JCW# NUv*p$UV%쁄ᳫHJ@Z+iH>ğ{sUBs,rɥUhdY.Cj r<+F j*/9930UP9)B*ҽ Dl5ah*М{F/_eruOhDLLի߈4:6@-N;lj1O8U㎊cyBs0c'2-Ά[QΦ#+?.VI"5׻qOR.aD )t$Tds%/XT6<(@)` "8c8V'XpĢp3=E}[?+c08t"`e*vnu )称bkj!YCU!K ͋wWv6يC@D,>9^,r,` !1M޻O0¸f!Σ 17=̥aq&XڥuŽ< U?WnFL *Hqy%Bj2 /PMʵ`~aTea9,sR#$lb  R3qƍm{(lU Ҕ2eVR'gx&P <HpZ}Wf4 %bW +Od ԂR*O5 *pR, IS[Ln d|3n>h~ 6:3NbqrZ}j_yK[!LfIh9:Z$ d)Mvd4m,g1k,$߱TT6O+ꎕRӊ\iZc uol1Y^{ol._mR8|朖0O KC2eVi9%I)Vh_JjU LbF7Gg-z 1ωv.Q^]?(\?9]^@^Y{ {Xm(.,_??khBawۧ_>0_>.w__DDpO.jF)NaowFzߙ>r藇>6~.F_sAgjU/]q#TpZ~pqWAgIĂ6e?F=Ge31fNXXI8,4f^ { y0y윑 -ogQm8[|:D ޝ.ITD >LoʰOl#UHY`O7X-cNwm0A]ҋshlˆ#J$ޔ*pr벝Cl̙16%[/ns ";̆8r>`Hxmp4}jW38;O "[m]ZQlXczl*x0XSxǡ quRW85B2\Ϯ%Civcuy{ .)^ub̅0G'L"h2 J [VL,EHFib>,%cc) $ N""6ӫ0~fט*LYgI~/EdUʅʘcIӟf+S/*ss( V'f f4X"c alrbDz aHEV(ZThĉ(q:ːRq Lg%CQ;%CYYAe %gVFc""4fMO::f9# V o->qoE1;:r璵OR}V]sb]f( ކ&.죨Bi=N& iidI<pۓ@1AMCXBm֑Z6%MS-sndCt͕ZU,w:N1ȝNqr:yNS:eqy {A$]4]qػtւ @8|f?qAg^9:s]:T_9s,gqy퐭>:8CǬ VI/D= bxHXr8GܡU5bcѲTȝ)x[I*1VP)Fyr*Pchk92MB岣ɧֵ? UÁݦu3& xEb'aL*i1w.6XC~7hϼ~?sˋՍ~oy 9ϊn9flǫmo7?~χX)X!j)Zf(+j0SYq?UW$P_t-lfϐ-yН$+C[ ;2IdꊀL˓鮩 |*I4Z ^h ɨmpF^R9f1R?ĉ0#J&#w7pb1")kz61KS9DYsg$HB&@dl]ν5JVI6֜#Z[KK\d[`,/Ռ6Ԝ uZd1Q%qƼˇd&OMtY-bv->ȕjωeZ7rB5h#REr 7;jƠ3TLhӌ!mDiQO5H8uYg$XŌ<[ʛ%4~8W$!NdSқqk1l?>sp+ZjHhYٞ|@0AϳCM$3I?ya8DXj+Lvkw>\\@W$>'UE#0(vɁe 43V`zf:O81Hk]6[opK[EiKb(9(֙"@e\1\i\#_cd/B)s)Ҡ%  8Ș_81lE2ujt_^`${v!fLQ I9v[MF")pi b;zj~!sd ym434جLL 2H_F.c:89wF1TL7Ys&ɛ4l9K| l @'ު"-R"Fkf`+oA#'a*C[Pi%IfE+DTļhE `/(4>&kd LR9$Is /ˢ*F|@;DI0b6y D,Jv T&}'>5IOLaPH(OAD.7AE2g##;~ƧciK3]2q@-i^ |fFgs倈2>)Ǚ 3*'3Ԕ'Tߌ9س_HLGjPiHaΙ,P@%B0DRL^r %I:1edA/FD9#'A$`!:[tB\-xJG]M/}U~?;)3mfR_N\ziCfL}Kphv9=/dΉϣZ4ȍٍ4fףɇso|<\=yKL8Gi6Oj~g΋:/󺱺]!LG/pq9<ϓOyM?NǩȃA> FE~%z~n!{,v4z}u^fMF:ꇛ7oMu//p™XPErI ߑj1 \Wz_@Nl~k~INSV-. -V?]O&B ڈ`l~zpqg @?d𛛇?4]E@kۇk]B.䪣<%EzwAZ$yWWCl[#^ <]s:~D~l3wz~u=?`^MwnC)EpEc1p5xE:1Ia~27zVfTYM/5qzL[g'wHWBkj”$-@D%{W HZTg'+W%4'n9s I*d!T>ZhNNhq̤=f%X)TG*+rD)_rw.zT|$pΈ :4Zا#DϢ(uQZ %e2'7tA q(H'A2:&3ynS^$:%WNKȃ+=lM}F{؝ m @7mC*n4_^Yv#R-(TPu%g2,KTMݸr5F$2Df@?hhC ,N[^X$tGfz3"R6LaΗϚg> 2Ԙb Fe+< \iAѓa,q'A{zDV;D'QEsG֒As/Ґct4 Ĝf6 }bHC.i {֎yDnU`3R`G. D6L* %z ,).g?T ծY6+({zpZwt>|tC *Q]QBl:ǣkV<ͬ:Mz) `+6EKTi[H)Q)x2hddV*3d3`V[MJ XJe/V\a%^ ~ ~C=E }⑝kKq^CYźAuHRY;@(~g8$XQrȃl01$&Yc6"(&P.0jΔ8M`2gUV.SCN+-yXvJ1%)zdvCOZ(64}~U n-9 hYZim@/pqk@+!np!I^Ӝ^^O i-Eɂ] r%3lu8$S -f1T4cB~@wewYo]2 ʜ]\He(-qT)2dYbN r.j+l#sJ>8 JU.Q8 Ƶ- ]a~Tf2f~ #tͤ,UҧP3͢Yj WB-O_ H =+$9m.|JQ7bmW_Zmh,Jށtd[W;nk?rFB|tidoց]x+R~˘.nOx!fsƗnZ4/,JWhNk\9Ym)1"'+uFJ1Tu=/QEM_9>bzw ^Oں|qg Ipͧz5bHm݃ge;Ԑ[5Ϫb~g 5bڇΛ2+nckĝf,8ׯwtl?޳a[gɭU>;Zy#Rq\RAs2d`͎mcpƷuѶf_ґfH ~ɷu2Zn ceؖw>Vv^F ӆ޷kvhͦR=G?ꊁ j-K\蟾::Ɲ/nj\AXwhA#c̖LV]ejtCnHd\=N؜tԶ1!fZqmBJ`˹Jz/*Ļ՘^2|iR{~.>Y1|%Arx&a=Kk?|G`~sUZ()xR.rfvlr<"k{+rkCFoO[ U{w!7F〹TU2He;ƚo򜧦'`7$[fSF,mpsWjj 3̜տ1 @nx}TqYaIPMH&էY2mhD)p=giߘt>-Zyse cltivJ7[Az%?%bL*cG4,<.{NBk1XGlnI4+m#=.?Xp-_CƬضv Gp=_^l1:_2I5(ߍ|2]hdk':7']i-uRsp'L-{(8is%reO5 ^F$R+ 2[;MFk5Kihnݯ/!&d呣'hq4YҒ9~`G__,NfnkEWY be߿['R!#IPv2xڇmGf}“K"q8B4B)f}4Ym0 1cF%^1{+-#TYSy#y0p#7aI1jK?ݻ(0&tfL"O 9s)\3)J*% ⼋>yvƞL!l(Ե=]Hw je$d%e4AES43Q ha؟@XSI~?Hiݾm,O z; v]''@cD&n?%3ԆF |oN:%G&17edj~WΈ뱻NE)xK_j':ѷ7>Nj0;!nZe? fR1}Ubw"]ߜnR/eKV|trKY0S.^-T.>5R"s"jWYlWBpe+O TQ/{s(J=miĢ;?8?0 p! mqgyܥl|x#ɽv% гI||$@nccY2 vLL٧=\DX .bq)A>XKZ SY.jSA mK9UoCE@`v>u: ;A֚cݎ_e>뙕;ʫv||#׏A* ] b>J钵';Ïgv6ŻPab+3Bs$2h=nyr}kdgU0MgFi2~29?L_G߁'/ף~$5}m&_dxq?ӏxo3{`źekJx${2!S!zYyA)b(a0Xً佨c׳ 쉴&glg.xū/9~<>ͻ˧~jOǿON'dz r;1ٶ_f=x̏om\$0 B O*B*orۿT؃Xg:܉k39IgGCd0T@ z/.ȬҬy{o3.<GrSHI: MiEn} 36QПAk}WKo__.^~N.U8bҔ= 'ͳa~ ߌFӳ3,J%_ϕ`yyb |ds26cR}NAAP.Q~>?\&gߎN7}[jݻ9"S,~zhPw.gLǹ״lu]ע!Q|M"ٖ\֑&|% +t;|_;Q]2KȀ.3X!<[l)pg*,Ta0{W$\Ġן>I'If\R&$`L2ǝx CXB([R~8ZCXCf[믵 >pԚbEoE_0G_db0X@q `$0c s?|Na m ə~eƧ!;[XP +N)ٶ@%αEmo_u/Aa6C.p}N0X+o3RzuC_vênDv:e7i8o V~`zΛ!5hY@bȼcosNݭFcxIpMdt^fM;P9:2\G>뙹[acAs ʬᱏ5Xc,%TD^83n :u2gZMo/j2TBaJJ\ˌ:ki#-Bv/KP1uо˄]Ŵb!bEbBwH|BZ11&=qY$Hqຆ8-c c쉍$rR{О9~E?}8gMv*K.D[:S%9[ ;kIjԋɠ.kk24Ji-bkgS,Ԧ`&(fP,d9bR4BA ڟ`08cV nbr ֜HUcȞ(E3SºFѵ;-="f{ V$ ͐h,*d:ܛ!u +^rNTM{$s dnDZ& vc+D}!i p]VfLOm iBmoLc0] 3Y^lwAUwTfBQ޾i.T\H/v L3!˭sOt]*[~A}j!$j!a"pcKݱ.ڮ#6fmn&U-5w~ej?w 26bN/tlMs0Bmy8fgD-D:6vi t1Q_H\Ԣ b:(UfFXIqmgFD6fJAĆ6(H !*Y֊.}1jKbva+D3b ;/ >/aXϹ=/Qy]8*=\ظZ=.V̞t9fTǣy9B]2*=7}n$1iިܺ)8n>h:FF-~oMcRj9$!8Jƭ;hhH ؃aYT,dwft㼼`ڧè9n|Yp7…[U{jfc,F\ &h6Ǭ9LGJ7&pT(tDGETܛ*^foU֘snv3oA{6cucw$؝!e)~NsY_OlVWm@ ώe"CwCF!ט?7o # e7p8F ζ@p=4\kU<~ +:1|ruB'%֊ O&V^4r:VBa.V$jج5zĮeJ{ίf2/R}䭙%7 P8?݅:GxUI\q#v{qB]vn *ܣS {3+#ZꧽG{]D0T>Ӎ\\) &.&歴v3Z5ybokK7Ż8UW+NW ~]"⍴ 38Zg9)RrVQ<3=uNPQ3\N^9%$(&sz;guJP#$8-T\'݁+TDJd$M"ᔦŜH)!X^Fg$q.f"b:fc>*#i FLPZTۍ W3CXSP%#.r kcBa"mĘTc"%-gk7AMd?c[II$"4NbT (PF0u"%H.ɜUgENd'?U'9D3z@Lc>L$iXI&WL[ۄ-PBb ڙ;ɸ}NާC!'9hٍ$MbL)"Qa7w^ʔ<ܘSkӂMyK/ŝp BJ&Y4^jR[Hl}n4MHgCamkCA)$V#uL7FdG xs0v5ǪRk%bT/;?$gGf o_ uMS{UL^8wV8֮;@$SJ8Wf򻟎t؏`7ht?!EPŰfkkn+7lB@w*?d=V%5Sa=fې'ޭmPt(!Ef\%th4KؘНݞ=ZBv])hE96>i&4ái +R)$B[u%iP͆;ZW̌&ҏQ܁P0h ?tQ C%4i͟|ƥdհ5dF6Vw튦7hrLWףs߄+ ̟/3''wK!mg]~ZN|1!p1;Swՠ<:7QU6賥Oe$5Lec3Չ܇晝&GCLv䯘 vmx_;.:>^3}.*+sv4X!4엃79xՐ.~6bQlGs*sλDM8{et~NTdFїLf䯘t(imlN)^7vhtĎӰGWLj[KyНҞh_1 Ȑ6 VֈD=4.ɧ\ኹ9Gc3 /]gUlƆEޙnyN}8E,Y;;h{\5=UgZhVQD]'itY:lr1G'] `'C$@ߎͺe=%@JveV ٠CSs&k{uE(i"`x}J{U?0CNs r] tZⲞ =}g?].l0SϨ>[(iݩކ+}K+b1 A=Di lvVh>'^z[kc=A7B0T@#ys YZR.K|Rކ{݃8oװ"$MK Ctnh@ 5:EF'*Klv<1Ʊ꣭W%EľO Ihy%sxwrcQ5,10XD3&C9 lTF5 lTF$I" gTpF5 gTpF,|XO؃(EhS:[?X:~hw@Vۋޓ9[cT5F5[cT5Fl`PٛhXlaѰ% aKrAu΢`F_2ʺY98fjਙ9dk'nXaIX̀%D>H:ﵝFXIZ`? luFKn~,)k*DMԈQjQ2j' ;wZĔtI- : N?.Zvה/؃gis*Ժۯ(S@$1}Fbxrjq"%CMdزR@P+N?46{QzH*M{G}G?:\.oSE[šwDO [E2Ͷy~P -g+u{*xQ6FE#x&R݋r6Ϯʽa~~ (Rg|XϻduFKmg#ar!#;>Re٠]ڮy$eNTyʵy5y>d3٪h\B5(y49_A㌳[l\m](%ЉWi[!'ɗR* ?:2Tr؁`R#7 -Q@T!%:q" "oVV-/zLІ@EZ=PvT 9 IЙRU.DJRd4~$ VSMZ:QX9|>˞jp90wŃ4޸eu_'//j6#X ؠg5ry6#2F0߮c̷پg]ϻGo1Ḅg5{6#tg^Cs8 6lֶ5%kt* 5U`!od swx/K~ɏn.iYw{TṂ :K0000fRKjĔ/^E!dbk DIYFi)$Ȳ Vy;r~K> Ʃ˷[ g{yc%(۪x Z&%0œ%+ tf b=򁥶ZR ϒYFMO& Z4v:o7sT֦c.g#{t*ʧ#sawKΰx7}PWPFhi,$ $,cF/d$!8EU$ʔ<"A@>+ v(p+M6vTw=oP,[}.'rބ׀/o ) l5`? _]w ֨w2yo9}w2w' ?P_J e))l]na{oV[ ummxxPfm/b <<FR-J0A%H[U J[7GF܏Zerp;rg;Ti LTA&L-d)3u Q:;_J'QXk sj9˕vMȗT-Z jq{q&-<)pAA wRhx7'H< ce1BnLЧe[&0I,M5$Smh]Aϭjtq\Puƕy3q]~0aL5m~tܮW߮ތLGKw?ڼ LJE)u1oۀ̪*Oby+ɚb۫h;QCyޜxCpЂYXS *t劃5|!etzR倡Q9?,F<ʮ G뒩vTDC(:0(Z3¢.`\{ B-I2w9Ũ>L^Ӎ?&iJİVPq$D, \֝c": %iFn H1"N2L# ɣJG9/,`{B)q̼%C2sRf IT*Uj`TEN,n4KLj9Q,ȹ;>'}F [$ށ"q7E˗G eJz94k=B((aJXNb3E #P-xMH9mǕ[9pG %EjΤ)!; -qZWWK#%8GdȘC7]bC 3#d6{<s϶̓i^Z<y>[wS0qbwn8O_i?}nAW5O7r,$m_7k$x ;\ I0`XdGԔ,R"*12xF}m Sm:zj٧ڊIEf4Rj`=!h}a,uלTy\!]Չ hl% |,8C:ް[:6F)^gb6O9EI"1y*9ʓE[q4]J+x_[oW%*Q~Ƌ)†2D}_ qSv>mOm|2+e%$A"BSh'w~"hr7 W@=Ogwi n3 )]QX9͟&hL ֍j?Oǎ`.l)TЊE у9aEKm[ܧ>ZhuNe(6ZjŻ u (WZu)5炠=  r63:\2UwKЮknBH:WkUiTPu]T\i9o(t|ɱl4BW>I-H4H‰[Lʩ ̌'bf}K}4ALül-8z-Q)3lk]lMӉ+E*U9.v}(kM,pW`@%Fl0Ϸ7_7W󌋆 bNe\˜DliQ_Yof]o] 64oLnGS8w{̶li:oO:$IHdz>o;4OԲw`]1t=1^; ym8GE^kB6^b| uX&ѱ 1DCؼg7{u|3:r[pGzm+&~Bgr" 9w|\Ⓖe;?o[0(U3̲ %Ev7k6>}߿ǟ9solrZ>cxܢ! _tbYn$AY&7> wPmݍK"Ƕn`Cx(~l i[t3]6v%lPL _0 /1V ySpl `>G9A-7:2Nr ƈ>1 F EBSFB-3`#|H*UNE©pgwd5R[2-K$#iI8 kRg~rIr4cTHa `p4G>:ĚH"eIT 蓊{kl,ҀqSDr;i5zGrZ(W& )R{</'~e 3rKUvIubթ'ZXu.-ם׾NΕBc }VΕdQ/Ys9W*/zcsܠzAIdT;RdL$KG=ʖsn0ITRefz޺R)abEGB QŻJH:  98XģN J-E*VDIB&$Lo H@~+MC1R:2(y:F;wқadٰw3m#yVb=uX%LX铍ld?i{!§p\]Fo`x_,&7w࠶8A?"/ .PfVtĐ8C.Wol1f!^i21YNv5H5'8#7-J_[rtJ&캄W8r6)/]/x-v4Y<{u}*lQgfb/DdZ,+eU]7]/sۉakO״8M+߬L)KhmDÁAr#E҃XUܽyCϹCquAHB \h\X C-10$=rY "y`i|l Xn{"J5e"?Da4X<;,'YxyCJMؐ#xZQ7IOYg(!?Љnj-(:AvuՃތ{T±N3`^U;cyr--KrZ-aVE$ !0[Da"$ !# :ydX,5FsNvlE"e1G{GRl}`{s-f e`"yͧAȽC*+ S[lD3 60c%PwR=I9aȷS)9(}bAS%9iZG0F02:MjF w% cg}$I=V"{~EůSk^8 J-·v1 cs =)ra %;EϷZ?]7WY${sX` ~r`GYIX?ZoSXx$NwWbXYɫH4I&Oǧ2x n  6_J! 7/Ñf"₦q?.:_| /n#4m?F DwI6ѻԄ'NL<j ˝cn˝VxnxNC J)e/X.2&X5Xa-6*ULV/?<[Z*K2(nr"ȣ}K*;]6-ήYx2zC^;M}0^ hWTg/{b]>z/b ]~)2βٿ%!MQ;īD-WXr}1 x&@em0[R!l3H0I[>-q %l8& R m;%K.D&'8Ӓ , ,8.} 8X16cjaZj-r-1:ǖFK5 Eٌ P}1ln=VnFooc肱#RJr´\N(:!#C0BSZhi+C!1C7Ď׼go&TR7K)U . @^ipa/P#ls(bu$/DjiP^@D-0l')J!;Ôg" ӆFV\gkx{ΰQ`! =-EXj5Pݬ0D@1`}:8F?H;VL$=}Hժ$S`J:ta𶖃#qdH0(S =[jldcފ/niNe[Cn[EV=LJLبSe< Qϒ2\zD2~(*eTr5mBu̩̩eo?meNUPTk8*A>;˞_Jv:$beIXLm!x)Z.BGiڪ.0HwϏx ߿%o<Sgx=W1+v Y{۟Qgw| {uYgOwl*w&K~xzx#h kE{4~g H"JsHw] $ip-1J^߭ZWBN_Ʉ vJq@WJ B`e4puv 11:U[1WsAC/=;{m Ǩ^%rF]pz'C8w=7=C{8cz |ܧrqnqTjr]"[2hSу 硲ֻQ6,p7m<g_*p v^' 4zPn򦗜U9V*4sY.h*3&5ϴ&Ba?f闬pMO$W:6 /qQ|eT7/9Biw#P/ A}L l$TMޗG(zyc؏׊M@P񝟏lvag!P.{P`-yt^Ri=^I3h(̈ XpIϊԗݓ2/(g{xfE1߁NS2Qz;+SI28YUPQ(Nƣe>4>mdXÉ蟓71)-g6l'Owu&c,7[xAw,(z&~V| x#~eEUݲA"]ΟO99qH"yzBzʏAYDВ|"DaH50,~७G+1gcJbn*{9WL4Ǵ!!_֗)0*_a.刂i:1̂=>@mß~Ʒۃ͋C|G[l]/ C~ h*Oj27<_~zZg:;z7ث "7a.e p%2JplLpT.|a) "k[ldG6־NhsHP&T7KSU˽LfeD8bʈ9=63;`U99sVNHbR!h|Ț=U.']F{STkTPT`~`%߶_[T!JTG . 3g| vpr=0t~u 뙼 _^ǓiPJ}?>ϨiS-~ ƂQALo% {0$ңo Ƃ\hZL -VĄ@HC56|8iUO[LEdW11J=}g8] {OSJmkZCkd\pKnd;J#JeAm:!y3M|&8VU]ΞL^ȱwr4 AYYt@Ow 9#B 9ɴ69Vftv׵=FgHP:Nuy[[/A}&͍JqLbه/OdlKA2<Θ'8Ӓ ,e,ӟy)(҆7qrVxS"q!"]g;O-Ixxǀ!zt1e4!S޷2!dﲓ I=Y5wҔCemO  ϲT\N%:g8HpbNKP{,3<©20MBT]Pq$Z%X0WB9HhDb5`8CPr*XӀjAk'"mofP8KlAY 1Js ôZZbum1(r#AXɠIҺq {7*G2I:nQ| Vbr ^]Kؐn+8 rc5p  [ jlT{8TS DUvV{Ǜx{ȆS}8 6p~ݿz#KPQJmѝ Jh1鍥|\S&cQY0mV҅#^8BE@"Ewn@ 2MWrv&Uh=F{z:\7ukYX5vIh Niyͦ?xQ"D:|)e60iaAj: J0weyв5#cښ o ]6g֛st-xN՝&)qv&/L&|,|G.+06YQӶKiOoƊ[g%uu |2Rl7(&8.EWVr>4UjJ oQ>a֤&uStϐ18QqU[(f)/{+b"gA< 1xFpJcD$s,4Oh@W/:>}) CEPN[)e:mݮ|6X)!]*O{ %pDD{mUjr:Z,&[?-Nd(~Zt^u)w3髛飍LclWNC ?ӽ+wu)G$r74FWCAw,(z&?9 "6z(v`|+^E12#7_]-g!Jё1׬\,nIBr/S U3( +ψ\;θR]#sR@& rc%2~E_AFq;10!hly-o'oa+[, {<}=ճOхa.ϋ*5,,_BG{o2~(Ow<'?X`Hf1Hљϑ4Sqi8BśRB^ZicRpVǮo-O>%l)Oh>y0(YOe T׆=qOuaOd]YWB{oAxGP_Q' WSEWWP2^X|U1 !:k{ߗ,AʶἚg -C|¶AV_0@mHqm&ȞC JMvNӍc1Y9ӻ@XeCMA* u5]^L?㽶>u#~eKBcw}V S|nFEQHZI@ы-B\JZVSe٫#oNJqq04z,.E8ǂ`k%1T)i9\85:p9xn ^@;Lbch ~;=\vE9L(!}o(W 0&]\WںBg8PD-"nj1jcL2G,XYB+knH︩Eal8fv5+N /,yB}l\D5H%lte}yTfUVf`#ȥv'E(o+FD3Bh٢FMg%l¥Y:a׻,/Kâ1VL^b A)AXr%uǓѧ*eɮJɓϟ=`=r8+5/R%oLPQ i4Lm:y ;5Q˝3Hֻ&)J`?rZyFQηkHyo3Np8q{Q'm,7qszlLV7Mxb^4=oMzxS[j܄[IJg=;דR&My˼ҙMȒ9=0#l\µL@[d"v.<ܷ׬n3@0)܋w) ^zW?;p긶+Jsck Vz%rrm YlԊ{9CF[ cάԼoǴv˲[FM̓M2A@H-nGb4ZueְDz50`~<5=rй31Jyg?=HOW/)Q! ˻ߟ)J.W|zO 0͋ 1w'এƓv*1뾣ONIQ˃SFJŒ''Il~:tP ??\! ޕ}/@A) ^~>)p ;a&ݫޡ:St(c0CZR0_M`? L8 _S^enGa0o>3U7n6~>7'H E㋡ ?V'D&Sw&Rj7ZH/]0`;YV.ʡq\Iҙ>\OZڂto9|LȤvt9xXÖZ}e:ySM\cSXeIܾ5~S KH)pgQH qčKdKD$kc[& zx\eVD47ϜaS'TdvW95DS&jsFX[Ɂ_^sY0Ƒ2HiώΫ FD5Ɗa3S30)j")(nȬ%T {ZA hτ6kqh@UmIti%볝z[rtd [M=Xgt<HcrI'#VD៏&붳 4İ;Boo|G.G @.c}['< $ 3q wɗe7^LO=Ygum'vf[ ܱ`pBHq8]иoe73cw^ JȨQ2C+sαM ݄3||˽; SDo;Cc90ƗBJhxO%)}Ҫg=iwcj7QvUoG)BEv[R0TdPNB;vC{4"8|Z#ʭДN2;4D-\ r'BXqNu*9[4Y#x1֜l&z9#o; 3"9rgq0a !)6P۟& q֩>B܆Qwסpn;f}?- u90bԡb+\`;fjt Tw 0O=EY0ƗFjlʶ!j@yZZᕠJ`bSZ AZU@'&5*#s s%EJp]I%GH"kRS.п &]i<^嶌0xʙ.¶ďمg92uoKyAj ʐV6Ĵ q-3l/V& py3T({ZVr۫Ne;Llb V9Szg޾M 0䀼aCw)oaOU^}!\g_`F/Zے$^݂3:Ǜfysdzq{WWcׇ>};}a'fo\“Ḙ{c#[C[iu˵ṩavusO}^C^;M![HnCfkڨw]a~L]3U kmd{gz2c=*r(Z;e:[ 7kʓ }kⅣۀ#^5Ղc" C2vݚG{7wkhQ*$mGx0xҢ^sW`CP`+NvS&-f7ŀ(Pg L*HA r9Ȓ2TF- Q4hqD@0e-g/P)ӭW\\bV%)bqf!gzjQ,AlPiT>Z=ThC5 PAE mX{ j© &)Y.p$ܐ8qg8~?iN`Uv먹$:Y(p\'8E#sQ WɚH_˩$/}vetsYäZ`dn*]phBҘ]7돟p%ф9jb|[ |/lhtIуgD_>չ@9[Å&L'{ )LZ`{E뽤RδZ]!qUvbzzQ}EӽiF DDy qɑ!!(-vY:Eނ|`x&~HW.E20.[78ч`J11X'[Ds[hLndj8b0=ZR rDt&m!nYZ6$+b|A;kdS\;k.Y7[/.c%t_\!  _]PK.u`KQ=ÑG%'K? Q pWXЛF'hiWwmbqz܍!8jE8_-Ejܼ Kܸ}us WG<,ᅓFGÆsDg,OPTJc <.;x%2'~rI}|HNALpABbStpk*Pq µ N0uߍ%ae.,i*ØK@k^*V1!}jJwj19>,2{&cT&0ZIgۂH{)7VAc[ Ts# e0iPf&\‚"Z $vhl8}c|+E>qu.:y]ҿ!5Ou@${t!_k 1چ&#I~wN!'t;ac,tn1rc)wYAN|) ;cq.sr<h:~Ӫn}=\iamKp۠͠=R3l]vE{  }Bga, 5Z!;:, 5wۺkm+06AA{-B!'/7zRWzlݧDP %lߍHTwVT9<=l'L$Z',e8D9'JaD;? ߍ%*AXZ4 _k.(gi)ZD[VONZG&Mp8"'4Z7oi:irJr1}mB"?;Fߵ 'v\s\mBc^<5lxG0?2֛cwկIYI~f^}qnHV66gq%#\2r%#\2KOp~]kxmMY/#57 Ho;2X=6C}#7GnƏ܌3>BZ'>rƨB6'2KfIu.@ \>VV.d!qJH&1F;BpfUTQ cIQΌh<Z=/bs7=Mܮ6L{+;/\ ?9.5Ha\69\IBj-- eXJf)XcERQYxGue Zrȵ\,EqIb!돯ۇxK.) o1+wDXt[͍W=G/ڈ#UO\ZU{$ r&{K?(4xT*/7Jm]wg]+?eA1 H|v~^>K:CMD`hfc:ՄKIWGQ[DSt07G;_ܙl:ux{2B*AL C$i,UFsBU*vi旬 xޒum'Jj![ +E[-&E5 YcdWMj';7jsH;.4(4N5`*XsGLybmM1dKbƳ0pZb ח!\_SbA9w/ͤQtwgZ_^+5սr[_itkHo[YZQŊwŊ=lsqj|OQG|A.홴֮Ai=\Z>sw꯲;n4T:D֭8(`݉,rJHa蕄weӲ{tkQ9ٕCjHBqb2N#c.iPy58+E_EZqv7R8BPCs feT(5% DD5Z1^ H^RI+I>FvmGj._h=BJQuV0Mj[Nv(+B15Lj Kyb$0Y%V 08B8bHEWZ ͑=B4ŗBUDjTWyWu=k9^+JGI84cO:>К`&%Gvò{ls=if125DtX^SN=zIXZk!<7˳?WM߫.KjUR(l]1sBX@.3]|`9s>X]˚\?Gcз;X'}Dzn@fj|q:h"l6k{62|di%Z,av 71Fdq$>w;]x\VliHC^ҮOzFTźI0}nN;XeL5̺ŇZ&4䅫hNI1bŨ- Չ|G%֝Lߕ)кE_͆Z4䅫hN)uSL8u DubQźD):n9֭ y*Z_8$"$NJW+POi 0BN*nvk|WκE8Ǥut'A= 6PfO|;Kȓ'ޡpuW>͜MB0Oؚ2ƊH9[8EȻdkR̙*]6y_R/a&-py*=e +6jRe۵jf,hNM"U&&rdX1[Ҽ)V. {d\)F\/TE\- 5 CZW5tߊ  CͯNs( h2H_94$zD LMugxF-1jꗾz#tŻ]-V,+3l!u/~eNm1ZkPaA0nWwM(I7]VVT-Dx)"yի_=MҺԯ!v*- fbS7q[tHtq}yv)O9(`|]uAw't]&jX2nZoIɶS0.РTnp>vc4o:[$vvvv LcLS6" j7#ᴐy*Fg\fx> eyfkY2EUޒ#jq*cqms2&~(:-MMpFXMa$ehf{$\d)& g2 (.K*RTtv,KeLf;hgzȑ_2;f5`a_vgf00/h0y%$1,r֥bVfVeI2 ˒/`0J•J0R憿oKP*QN\$l`){%zKkPњ )VBQ A'spt@ 4vyLw/ճL_=wPF{p+)qcqp>i${ x3|bLѥW?]nWwɮBl%yvfpVֳ3HPQFȭoR)pFD+К>im[h9 v+艺r:soZD[*SF"l&#I"PB9ۦJ MPwZOfr;KjX&$no,y$}%Yo3[{GLeHf Yoh` M㶗gB;E-h]/z"8IX ޶d (/2:,l%(s~ x/sЎK΍"55Fܚ$+3JDzyzQeJBD丶ˣt9VKfbqh;]bT>119K 7 cɧ(@HB$"z -W 5>EI—R0*M*a,Q1^F-R;EC>GFܲ&ɶ )aH;۸:m?|ѻY<3.ǎ !'49%GԊ Ӧ?X~ULkq>EQ.4rLs]Efj)!Pź cр!ې@|d<2ֱxAqvqS죿̉[i1>'&Se}¯VgWP&OolgC8!)EdV_EKU +z׋?w[jy;L/ƏLzx?VmQƚ9/ /GqRЮ~hFF}Ԭo/mWҿpMYԂjHIYW\<{wPM.j";1z2bAvK6wc[N0tHJ +5Fu>[׿5M%n2{!i޿n?g?гIO$G-ӕ oۛOTQƥ65ޱ1zf7:5<>2+ ͵!Yv֎5ד?6|U xJ9sw[Ʉ-揾[Wg Vb7ًS,ڡ=U*CRiϓ#h'm3dW44H}`1N9{ NX)=8v juv1YG xAÑYHRX=YS5{23OqF(83zzQTz0Po7lqPҙl$EtL kI KkfD;xrɱڤo qHyf:ݭQ9F*`D~ڙsȎ rx؜fD'%u`QH-fIoyJ޶f&#vGz,%pPpF@%odl8^ơMr)1zuVzkX됹@N0CaɎ1Α9v UiV-{y1@|X&wM#< #yǞKs:F< mlȩ^n!!((+.-Ҏۮ*w!}!P!n^:Ix`YI\h+YoWj5e%\,=dU)ȃ%O@Vb0շ`E`lJmsMm2Ag蒰\U* I4dlEZy :3e.$w P3һ'?<#g=,ֺt , ^$~u8(ZqwGa/I3><.ϴZj3:Z ~84DL+0CLsH>םcV>ֿ/ bWձwO%ֿG9cǺqcq˱mŭbɻ b5j!J/#}XB޳'{CUS-6V\QO@t *afרNǭ607q49mSLԕ84^RS:,q;0RJqb%g^lwX)}6`|wI]5Z6Dh`i7f w9T z@TJ[q %>j~.J{5,(f-^S \54mrm/wTռfwik5DgQޢsA.Ol{W1׼ _~L߂54"eպo}H<pB۲\ƺ@M+6XIgG&mWk}\n^] &BwʙҗTcDZY:kh2c~u7,`R_k~;E/th)PP"!Y#jUn/d["@c"ۯT-EjP[E蒢zo#a!Ve5#)֖QQųT n/eX1 9$̐\V'Q77bmR m%dqrLRJxo)Gc}Wu$Mˇۻ/E~?tǠ(+BI{]ۼBV )2#Ů- `ArbO ѻa:0 p6W\Ѿr@5XXMMO̲21JʅRNP'm>ells] "L_Tii3u@=֏`R8oC,ꙨTl@5u'Ƒ'ӡ"VͭVj'6Ū^\rgVŖg!'E9gXçc<Vu|[*SxbaVH&9. g&L.^+7gk_ia,ICϞkas T{rD8yx5J/h5?@*4׷`4 yW:ӹG uqL3v.[H#=eJ 9p o~كR{_j޺[ \tGLapf!>~u߽r c{WacY3MT.ly N n}Ma5c;ZD'&H(*͙DyP+s,czS%LUmOtb!?O1xIFJYэE/**6f tpyn0/ (fsg_ۥ^0!Jw~Ql] .2_ܭs,At]oT1ag e0UsDc& _9r`ɍ.?ӋMjҧ/,;-VgRFAl2ҳbڜx31VmШ0t_8C2S AX=E Jg)/Fږr6ir3Ln{u[/7*F4 I֚@ߌĺc#kUvR31Xޕ5q$鿂ˮcJ|YIؙ²g^SĚPY&Aa!Q@*ˬ*+xf[Jßyً7mgej3Bs~H<_w{څrBvWݷdN*"0~i뵩xľġ[xE*4> 6kJ^,]ʜ;ӻ9(̆D9k,\/ DrynM֛!I2=E2:_n"1隨.KM Ѕ_DEJq+\UDwsR`_D_=fz\D/K/\F{{'^JkqjhCQ7jvURMZl}3 A.ûJ}Iu@@No(Dtu L]"ۺDZ\aBlB:|6;G߰ G , e ďymNhI:=qI~-[SN=Bn0 ֨qǽITr'ecw/YtsCax|?~w0dY;r?䴟)!H׷[8Aȏ99!0D O '?^&+p)3c|rl[-N\+3p4{|z_ğMI4PCY8 ףo*vgn|Y~~4XK|01P @*hCr2A(kETpӴ ~4^ЁveGӉ>Mvo[Ojt<&:=m8蠛iY!l@4jx4)Q7\B%ȭ\WNqA$Z-LKlakOt,i*5B& % WQmFo^zO-IbiX)seVZZʕq́ G%p d6Z0'п S3 Æmּl9 ;E*%M`cRz<<wX,{;ӯ~^ba/oNTb1{?>h:r&ʏV/0 W H3֢h5~4Ћ+N%m[lsnaO1,Etл I1\rx`j/4n}JE3/?\]E?.7)XbW4!t)\YEDWP Ս鸚6Cǥӈ1vUytJY ޜ=5 ޲zWgito|-@SL=Ŏ zvnGv8pb0j'5w`yW5An+bJn[+4խ_aVs-yRkꤕZ9J=IF#x}"RGŔ *Y>s߆!B"=[t'ŝw';MAtt L@HL$h+>"QS %I t+ZV8o @Xgn9*>"M gc4M 8Kmuх$*;^+Y&i}Ĕh|.1K:/WbCU<ğ 0yhG﮿^-ϻ!n&c];d Ce7j~ԃNjë+޸l_l-^+19gX;pzW^g>u}-ՅH56p1V2~4{v9Dk᢬dd<T%Cqi-h l(l4R訥 V(qWn!/T@m"- 4(72(㍧jjQ$&^'k \5a06cC/w۪O On2,eAg 5=5~%NCNiA4w ZcIQ+Oqa_useӫy8E8͍vK:S;e&*l' Vr*Z,ylN`UU"Op5 LdS/djGY%'X؋(dV OW\MM71[6ԟ]2 Pӧx<|u'ݏZ®Z3];m 5-EƬOEkm`xDed//|1Ų,af;gRr# E>!F¯G׹_GfL_xW UT5}ֆdji inNw $\wାWb[REǔhVNM Z eІu ڠr+GR%Og*kLF1˪ժJ[zԷu%!DOׯF_obg{ή%{|Em7)bE_}mY|">~ f_S=7D/B~I%~$/n<4ۛn[AxugL]9_B5!V!+u:2؆tÐrMsRm|D5+Q=ji;|%7#~uDMriH=X 7>ft6 ;,0wSY|;8g;Ճ%'$ ʝuGŅbqc7ĨbS%XCN5S9RB9s&4wMH-r(Xm*<0#8Jn(XUD5=`yܜjé!\%K6k`Hfl$X/S$j6z xPE8#~"W_7uOCsw~>4o3UzzXnN3փ[eL\\xgF ?|"k<KWL" `Z˅j;k-x{m6(&ףYv?k֥i\ '%*:wr7wGXnh%t(dn 9@Pщ!qKIfKrJ0T+#`RRH9  bDxRU"B]S(iv#QAE+LNIsQ[9bL-3JIL8Q7NΓOSџB63U\[4sLX@KF !Gn-Ӓ"5~lcUcaeNEév%l3iyڊp/_>e4ߜ4zz`s乖S8W#^,i <(lRGPU%8"Y8Jhw̢5W6@{b22*o"pҩO.clPl _$c|h[G%jޥA+|.[#GE2 ؏C ˆea ЦNMRöâ E' iCU|Sd3"][6+yͩHU~ޭTݸ$vM$QeFRt~e @80օ"_7F_5,:m0Ue7,bKlu2vV v5#iO7Z˛"Nވe&1+ɛ^J3 {WUEބTBҒFB$,o[xKo*$Âys{[ޑY]& SHyT=S`U#hv3"ñDh/BRF[։Sjros+c n/4m-p s5^oԄxT \H.!r4BJ}Z VmN0jMYĺ8k42dx#[-FhhڵQnjF5G=,j|+CHiҐ*z @ dTg{?ȋ 7ﶊ뼗pӕ: "={X=,_8rVs j9[pSeݾYKr`V[z􍜍 7|=z`>yMa|U ֒xٳ=ĺpyk5zVKJ'8IbrR$,3Lȥ.i ;hPZ5[Z݉(Wy&2$)ʉ18` pcYJIL`eLhI :H'1KϸeD&% -siMbFf)21)M"HJXN~VNm_lIΔC%$D)%2B"ڔqc1LO%3+rXiF6JdG1T@90^,mI`Xlg$'QnUy${dV.KĉV4Z 3)]'H%HsnE1n† +4"`n!>ӹs/w`}gk`aoJXabUhr|;ɧ?0lgN珟zxGEƉ3I ˏ&&ks9]'ĺM`<ٔd`_ dq:o%}2/1o**ͩDH^Eao9 ?i"#I"[[j%Oz>u)~ʙk.dkU;Oݔ$X.֗.9?\ c1KNBZbMjrnD, KkƜsKeU- 2іLa*4R̼L1ՊI%1ۚ {}r9-S Xmf Ū QngTB0w`]c$Ih06sdJ  *`ӔaSv?0J˪f޲jB(drD JzSE8>9h:(ǽ 748GΓ`[O!]2J!r(\^3GΧ2SiKkfjED·apok9i dU_vS49Kg5M^[- 9vw-3M.VtW=G?X3=<)ނ|Zf6]v #\vW! >>'Oהr`} (9Z.9םlsL Y& B86$wΦ탯'I\%ƴdo̮vL'4+s8o>zb*/[GhpZ 7|wLoN것:Wi^[A}\}Wk2q^zǾD\\e"R)%}i_plϮRIXRj4F1ILrjnqܰ %& sq&Xl{VjWبN,*$HLr2A:@¤Nh g\+Y,_kI  5gN*ABPTq LšЇ`vIQz);vfCyHE8:8hJo_ㄢ"0 ;k6ٱ,8D4A)(ٜȔguc0<`KMgTq&"&`t#Va,NdPS~STE+Vt0;\;1Ua$D:1/G5hM_UkYbD,xtU5L9j3Bf x5#B?WhgrdGCܬT{/iqߒì=&~휷OǶ>[W4]%V!q ;g`9 %T@;^(]9#KhxR LD  %kw-V NwoWsTD:W[Gڶ,[EAǣ+)zG4y*68ʹ3lKGBXX'"a3֖jd ']m%k'wq@:7Xl2]}TL/'aqbCQ?\`Gur9¼b wۭ c-WX I;6_C ix~:v>z)n{uDl35"(3Ȼ2;&T4Y" $%#X> Iqt4HO);p`jU=ԽΝHrs-dv[gL}5]`ቃnk$X%^eŠxq8ćXs*vfvqng1w?zHGVa^/&+$L/ +6i5(^j]f }5T7bMT'cJA|~mS q?:(Nc[{E"-!x6 ^4@ J/(saD}D Փ xhNӜ@- :B^PWoÌ9xšQ(k2BQA\Q !.}P#qB8 ,ZK/֊i sK!7΢bB/a-X {R 8Ҩrxł;_=dk;}ZsGcZ}zɳ|Ɠڥu봆$O.Q2%yuvS7{EA>v:X㶵[|lvkBB"Z_4D6٫A) D ZLSLuL"hA=?K >hh2-%j'>+nfWo|S,㏓Gؕ]cf`į (9^_z̹xEBa 0Vd`:)]]{uuڪF 3xk]r/ƄG~ +TV9ٓX&5YqMT |:嬄O(ܓ/0z$2J! ƯVxy_)K;S踖ESOzc Zkg)6x>P\Ș6>ѻ"~d&.jDm$jӌDxCqm!7\r/ =Jujii@N#Jr~Y<}=\!zT} e{2rf+lⰃ.>]<\n1ؕأU|ĊNW;8j}u[NFS9V8(A'ڈV~| QQ+ ؙwUǶ]#Ũ;Ik+Ũ UjVI1i s|?8ϧo땯[[Y\!Mi2d&7C1jmb*BY[\Wy^ SmD%O-Q|V\[¡0JXrϒ9Z+zf^ dUL4nZ&LtQ^PMj-QHUiڰ?ˌP" Y" Y!L ᏦM3e౧BzZiE6^3kߏ^'J(קǞ-W-˯yYg v\T4覮 PWNjGRZN~€8C73*AW\]_,iVZ}5S#eՖr1l߸\mhP{#{xvvo~Ns;Ns;ߝC:O+)T0m\g'K%L BYT2< ƥ`xdo7w//o\~lA19˦lg?(bER[5dJ`/]{M t0h4pGAH[}Z!T hjUwhU_r?lw$wg[Z-y[̵[|^3*5F?YmŤ"cO}x5U9E>6+#[)1$BuiBF{=r,)FIO 5֧7z  nGɼ˳J9t'ZΓ1ྏ^^xScUO29\DZn?| 8),MyHXi4iDk12ՎgLT]T'f5~juCD|3AO:FaH:}E%YËD, ܘɺP\B->k$"k~G4$D=8$LB$\H4U62aea|$â JkPs75 s7 -8Vb!;`E g Fo"_ Xں P3Y!jJDm3&i-[n oF{(kH֐z!%wص59' [CJ|we9!Ei ;P6 R^][*DC HEn!/,4U運%/((Ɗt%q I3?DZ&s5U0MPe/o<ӟSJz+G0V;YeHoF*JY@R_?ԘB/ѫhL4GfpPs=5&2 Aw(qI*g:в+V8m^2E86d:#~(- 1e2[ˎ؈1aA{1?K\`ܠNpQtE#)ad47hpBϘ2Vk)Z"|-#bƈKZVbDywh|- gB#T7ه( vtZ]gD ®DrO%vTɚ{*|=5Vh:Z~~3/)C. fE?缟"jP27+4s " iѹӃ.e,x\>u||ŞfXaDNl6j`Pp9YH^e.@a@;+];:>+NoDP !M&ՉTw\iiĘ;tu %^> :dR+TfEAϩ;j e0p@ R>^}Rߐ%i/o0J#QmJa: GKc)fN$p"Q^AƁhhP+i5CtZ͏rJ Ć|LJw$EVy/1IgTDmOjT5d$m3:xP,A% ށB61eAF;Hm3Xi-dDIoP1ưf:;Ht۶ZwoB﹋sm~CJ>3<#x퍼Z[t%:~db޻+_; .,}chLo"4f5 z5RZj#~CpͺJJ:+ )ǯW)p)ն?dĮ{5.S5V^NA!Ǫ#} Eyx¤8\|ٲH7;q+pZ>= }) *( |XTS+BG E ZƁGn8uSy&~ejߚ)1'2睯 j[i#1Ƿ0!B1cO(W=w>f. yRfjmnU$)}Uu9Ų;AbkN9GO'GP( ^#|W9n/jp.H;q$F.> 4N-G>8 F sgeR+gݗ|HEOE):pSar:ϳl.+ Ց?B\ƧK칤Cc73޼Q~NaL% :BjAIz8t.?xE嘂2T =~G%08rS.5&Ӣ H*){krs_f> 0yخ),}_xKf쯝]_m"&|TC"+.r"+.ŊcAP!Gk<|,IJ5q%-ln³#z;ql0^ss|[,9[\C CW[fEc?T._-9z0٤_d~MEݤS\A hj2Ł7q*$c\9I6>dVu[]b|@ȅs& /;w[o'#fe}l_;un}cpm/ցbԲ@xeL  IxEnݼ Ƿo@)mD2PQȼE5Ar"֢lj/'t;jPO84"[aA)'LHU$(&D!>mۡ~5qFe8b@ĭ"@ Xy"DEMXe +.Ciľ6A/egoٛ{~<*߬}<=|dJ֊]z64#cQyv9ט:w tlE-K*J&P4<$ #d@*L(1!HVvΘck 8Etm-"3[_Cx7ϋ ɶ;w翯Y_UʡH?=|[ DxYᏗgᲳuw;}3_ >mcd"/|9Oߜ-֪~ܼ;jwFg۵}.5Z?Ym*Ђ7O}x5UFi&*0m<3vpsGԎ>tqs q^nN =-!3Sk>3r-zA`,Rwf!(EKS֗:/rQ!FEOyLvdZzL&9aÏ5&m-8HE8oZR+Ge})A+i%o,|A`9";b? Z]nlS]\Oj%F_hTi Sj[fl_/dm]i`*B{D 宠Ϙ.HN{~4m8FdV'WWdYμEɕI5r7ݢx_J)TتuJufmi^=ܿ/%ЀMt>( 5g8z0L]SH`u2*Ze OFd#^JtRB`gBj1@H:7\Z-,K9m3Bt!k(3PBnA-j"w#bn˔:MA$҈c.8&<Ӝ3+5D땎[)vFp0V֜(9xݔo[KA@ܾ^3YG6 ul\prS'Rm72WaK016)Xp duO{R0 ?zmo@s#Lf#^UצNZwm ZM=Sqn8>echhlR=}+K4`h \n+=Kn]lu~w\42$M"fNp= :R[4;zF7r`#BSHG>@[Z]yymtCPT2D#8xX05oe;G3)cߞ#Є&bʝs Ue\UM4!Ջ,ԋ<'QQAK٘b>v}ƌC.Yci 2 z˙]fMmzKB@\;װLG,%~d|XJ/&Lm?%9lM| ,Ec+嘭@XZ%DNPyζ\WP*_:DMHfƘ' ̀pxn #~Y!͆yʕ 1TJ>p:r/ PUw85~*Nlhʆ,*1NeDWה*B˝Z'}FJPB=C UL&B%LQR_ޅ>p!(pITɔ^.on+$لʨf t$^# PX3WO֙.ߤsj|z\evDMF*LZ%xxƔ!LJ _#E?r2mTf1*Ђ=Q\\Jq/TR8m\1&a$J&w{xkXW;ר^!YS#pk@8^0OS)}}JOSD)˗%w啔0)ݒq<;c$1u>Ih1u7 i!|m*$9ׇP qmrwX4Q@q@D)΀4*)˜S:)<(^Z#STyFJ. c1 /XKyV3`wIjr Y<5"J4#mt :ƞޕ$Bew~Y{ŶyX7<-iRMRYXUi0lGfy|A2nF wi@X?*+~+l3̐~rxʓj%泊bq{J:r+ڠxH| YCZ ɒZ3L (rԌtǀD/P3]aHIL!/7-]]Zo-i$䵋VfsOTj$<#OP *d2^,S蛧M{<@:䑨c:[8W+VPC#giB?ʔ#!:(QwsC *?;Ws k)*+5<]%`]{o+xeXk{mV.(W]QG -9/flp z4TEʷi)ء7sO}4/ED /q%e !a1CA۵CI8d]$XVO^J(BfG'(5v¼ڎO]5lj}߯nR`9ZL.l~-lX,C-wS﷤T2J^&R*,gEEqet*!! a&CXA#5q Uc0j0&~mnMJ9P]wn*- "`F%VGGx 5nIԁ")Ql`dݭdswesX|^\{I 71ĆsPlX5DMZ}0~:?쨆7 .ۜƋ2I?jcҿf2f UX++ֳ0o3,$ze=q>,+\ ݎ,7,8x7I>Aw;N,Q o-Az.,7,"8Zx7(Ďbtrhӟt+{}{mݺoDlRYwjHMpe蘆qcrIue,}J),T>h㤺 MtoEڈwZ ZUS7~xvH"K%uc [$3ihP :B:F' iik)dz2Y&Te_?.sWU _ϫ+YzoV[M@Oo&N nRV0ዲWP+sC(./kLOxwQ0e}{߰$lDd EY,\W|=I8E ʔfk`N`u D jO  ՝7z oM0Qi*(ý- RIԺ-H*Y'wn$.e⓸L|U>.Me;$(MGL`?kZ38 Zw~x[cp;Ze|UZH4"xZ}BӞY6:i2*L'mr~F"[(!b.ƒOkbC-Xo,3P@,ʁ@MQ~`@M {j6Y֓ ;!^K]<8Z!^xa`@&T >C9y;u@a$ yM\/)ʀl ̫W߁C`0Ԏ(58 H@qc8Y!)+V,A^[q:n"TѠX#siRhD0%czpy؁Wo!b=K1H<10-+Gq8He">*n*E6JN$X=?/UiH1 rQeC68I-3RƴZj%MqFdI2$ wNVϣCpq`x!ZYh@JG=\hAez;0>rɂ@ ? )]IkS^C>!XTRӁ*{(f@^>FꗌTΪڵ@%#o;BO|ORDlQpz!~pǚFRP p駼.o/Jqo.Wty+GBQXM1zV&ih2{+sQdIFLch- EE.4J8c$TZw [ZZ-\m#V <1iREsq0\<{iSP O5(J#h]jC02Ȫ5q[sU+ԮRGo235q@%!lsX61C. '+.[+jW`Fs@`pL# sA{h-!Pt';'O֡*ꏽg|+m#(+s {qN1JL &y|£e%MMuŝÊcsD[!0'Ttxd=K7RcOXC+ K2mNHO; T%%>t6%槪PNgoZR:6zQBZ짞P@ϱ[#+L%9O -@]lO{8p[C !z)uyY<UPtN Y޴Iv&a+MR/oow؊ۜ":'/4.zҴ3L$z7M d⭺IWXf7xa nf-.^hDPR Ez"$02-dQzD|f\E]|]R}"@/Rg[ #>~mVh&NM6d % 1P;m>vM&FT{P Zf1!cDMc#!NͨLD)΀)PĞ,*0SbjsƵfEa] H g^/(x/VMn5J%'iqI2D}OKlx7BxG 9E7^kck|Lj4L#v!5GH>.iP~YTR:5 >htutҎG~?X}cO3OHuSWz1n5 r+ѷFtT;XUW1[mi,>.m=QGAw[2\uZY >FQ i4UE`eB*@n.Ay68 `rWxe|tňDy+VIsb,i]-R\siQ2$"L*R!c((~kŴ| . ]']"&sѫ#1Xq RCBY& (*ە Pȑzy[ XTn+~rVY;<ĺ1vBda@$A+C lT)Q  2{&i ]sͷTj)TYwWk0.r14*Oå N/LW>'5@,F(A^@K67n#鿢{٥T͇Ujo/S$_6[.fe$;V%KDɠRIj(ttf *vo>Kƌ#:X(X¤[XXNSǔh 0X;;Tu"R?dfB*ADu-瞄 i-ޕo,=dN+FkgtXel(4zee[[w.~`>ǟ&eAtF]Z*RW@+(4kTQ M0r_K%yͩ}WBJz,;F'r`ڣN^NB[遈zSe?mJ4ue $QFu=AQdh&O'%I8|dHS&z=O6U7(cА+ԝn'+Dz=vgĊB[ӣ>rgTW{hKʽahk6޵ydu?{u }}y^~g`OGwQ,>z0q~zU8"L_ݢ@v} mt&\/F[h 4^S?]e/Nrm*--G/zmq3¾VlDDN7<̃ƝtY^<ӔIWލDY][J*!\Ls˵܂ k rGadѡaOMjo{иSAým( b%W4~3$ C.%nB7{kGtit{9R\;¦ yXZyW{"~aLdnO]enW$L+. +;+\O<;Z} !R;͈mCyˋ5ڌ'G@H8%ҼJr/*jLW3s^Hf~iERr[V0|6JX?M|`+mT/ayqYR3n>*&ǫPgY\l3aS +7Siz$l}suE)8DK_H"% Jkbs Kg,G90.-+_,NF)+WH_c5W7% 8NX86 FhS+-ߥ4LoUhae:_@pk %$X293`T99vVj<0BO9+ A'\p,rL .Ǖbæxfb~ 3øX:0667#o737C[}r&g2nm\ }ߌ[5|<']w:)8Kow)aaIkB$T3Cvz'$\w{>Z J22f/jIS}x{W7: ( }z7޲ šPx9E [#+CK'`g#ZB&ukl38 W@XAϯ˅:\?~YNj}>wtzzT/GуCK\?O޿{l-G?ªq;}s&%2ž?Lhӏo/+5E9 #)~Yyܭ~2烀uJ&1on ^Ћofa?ǚFXz y̪$r勉QȈɎ=|LblO̗*?q6j*Ջ{Ke<1dy%Yw/6l0j6ҵo4Ok ;lXbL=E|cĥ FWZ>C^!z8 RDUhTH:vR(Duz1%;VIr`Jڎv*kޕ3(O IW@`ݳN3R$n N<'gU3)lـ{^7C>]#1MLN 4y^Ci$muw:6;#F1yğrV`<.-BB KT/6BHzi.=TC*ßaX%Uā,{pkS$Nc^qρF0XFB8b$y.9f%lƸQ n9j#`)DŔ`^:H.P2'bEQ(4Dsdr˜h\[cfE s(:B#epm$&XqFp3C#EnU +-BH#.WX"Ma׊0 J=`g :ba,"hK218n1 *psIp)3eL&S0)dVp ̡7x= aVQ fhat9Wyeɴ0-!02pA MLf3OǓO>Q=@FqwH%&EnshuP['8({ `g"+Zwz+>Cf)XIW>/Vp;&t> &BRC,o1bBi2 Im}ªMtIݶz//HדE)&G4 'ɿup@jn?0>{(Ufwӗ*R00 ]* V F5*aɮ1F"">PaP5' 2g x$TŴQ!T=Oj9wyQ]$wL~oi& L3CV.=1L$ć iMi_ݚTWT!Ssk!#aOɰ˧{W`Ȱ 4*4tHcz.u=&ڒZaڝ`(m3nOU]%ghYhN%OtEHO;EA(Ww@R'OaY Ea;s% ḑbьZB4|CG1ZaN8>Κ0s}7[,Bҿ7gtZE`2QyNIVe&._L(%y"xTΑݴun>[:V:Z˹ M XahXDBkS-gexLQ.3.$RS^?<t, &A5@TX=C~^CmR7NBI P6J+*:_,1WD+."w1y|ݙg&bR9ދ{|xԀNsלBv!1quǎd(n,!#-0o{puN>tUyֺGZ#JF~ez_5@lPx ϙNXf4gt﮸^TBuD'Єӟ{hӧ4P!DtP*UX#C*T !pw]}{?]j0W0wㅒ`KܖF(&;([%Էu#]`bd׷fb>-3hmw& e~57õap-_|]%+Y(]ߺeb`03G|>4,⿯_TE{s+]`R}_JR8x2^SB%iJCHZ+%y֑LeJvPgm&%ѻ Y66(;<' :48jݓz*`.+Ar c8SE7ή۰}8WOCF>ˏnGwy66]\]qjڴ2څk-97n)^ ZiլVOtlʷw7I,q&a-`43BX!7pUS[EZrʺp #x؟xa qGwOŬAƖcJ?%4#eFa}f'"%S([&OK:6N7𶇇vX=6Vhe/n5 9qpB<:mM}cR!5 Њ(D[~Ԇf!G_FoF{KԹps p2 @s`eu5ggwFw~6gxݪ40~o~qu?]v54sӋ?_dp懏?s.7'8W} }V9|d,(KV~_%*nI][oG+^~}Ygq݇zb1KK I.Oz8eFbꯪoU_C#K(c%2 - ³[3YX4β{gT[Χ۬ӓMp`&&Ss+٩kA 6 ߶a1I0=e1kd1<Ӏ+wDKk X2osB5Ja9@(祀1s{å<"h&RkNlXIDm5Sw¾)jܷz-;X1ra pSray_nr |1:L?]"Tu_ͮa]rg~QR|i~XnT1Ԧo'SYH{|-].cW>}ː6NC!Y OS`r\3wRPʞ=ؒ`~g@8w2;ncsvpr&7Sobfv{U;o(VYypkW?t>äUۍ:/%jiu> ;/1P(%[uOB`'ʐ7IΝ#%X%\{na)މǜVc묶Krk! i)A4Pl<XBȽ7*&#3j=,Tĸ2j/a*8qsPELhfZDvh2ի/u'Y~n~m~1Y1ϧ~r?_-?{Oq=eS\= 1'o-4wmXq|q0\7~*EbahBSx`ZYC>pA?H"dDZyIy,&8qXx%_ 1!F(3Ƒq&NhamBزm@J`!!!'4_8i3T5?jǨc( I9>B5SUZp^Ƌkr1Y?1~ [==rw CZ>cpW2A^yx5ycg2u <)>ܾ:# <?~ 3,X+/>!+Tx#ç(nVnBت0TJ01lSDRxGrl@ 55{^ʘ+QS JÞ1r\Tcz<)C_;9s{"p5 6 bz@`I(."Ze%%B\}5@)¨37gIpW61X%: 2pp[%$=\pRXC2_ycr_hTNJԫ; IOum'і\aiD>uI^jD\*RU*Az(.ٓOu3>W٣|^НRrv8bi}i|dwp8G(&ay 6RI <lCΕ6h(RF0˫ kA I$\ic]-J>rI%kνpo yNu87P&*sI e6`Khx+`(ucåW>PwK&ev$;T|-5c%f#z6 4iux=s$|MlBpmdSD~n!xN6x#M@ ]{U[6`MިÖK1pQm6@R#:NOvJ)^PP m]*~44"ӆ}Gq 茖AjVZͨք'[괢\'$%1JT;` Ӕ,0 XdҺ$YG;&*LORBE'dhv'O1;=<5P-`Lugj_1Äyα'cb|PLu$ O+§HE 37^U}-hL/3SFeɷ9!*~!]ZbږLc>(:F=6mE#YRb:/V\QaM2e4hZeZ&7ə6TIe\1tFET+PN X3s=ҍq1z#,KQӆ.FXX6s3R%b6W~;b7e$rɘ#JgcpP> :Dm#tr}rzJ@%f~s~}}NXð'2Ø@U.2)qn|8u6 /-[{7Ej3)F &EOlbyq3G#el26vYnl9לBޒ/Rz):܉|/8mXǠ*eC Ji'[`u*Ye 5xG1+a9Y0Y 8vj5GF%*Uor6or)l, .eĞH)Z[ēfLZl㞗J -wO4{`]¢31 &ïVfkbeD$)`' V-H-1U)_193t+ʛch&* j5f֧'}s/tł>{oa|L*ҋ8긩>ts8v\Xs-8?kՐ^'<a~P-Grw7)PXM%ɢ2-lbiav ^J$6@a~*IJP+ђh6jOp!8θְ9O@7!a5.U@:8b@nnfS[Rsp Qף'vӷgž~xD_EWp ?P?FGVUc+$#a]M"zjys3 ƳZB33ir2ƒ?"z趾+h\QZ>-C-i5U&P2"-U"lgV@ґQW%pN@wŚx5 { =aUqQQiK}zbbnm4cb@*~WMUca}בPk$S1Lo/)ĝ"c%-˞zO_ _jJ:2EȊiƵגAkԬ\e_{Z `Vx1}mf/s#sJD*2 ʧ/VO]y ^J'8p2J+D#f< oj;'Ƽp >1j]֔D!įB7oI#c &H}<g>g ʘ\%L`| [gmgWJvIi`oKCD2uN&ԝO3GP9:|]P-‹/گ`D%C#Wu,gmۚù4XS1"a|BH|]{:}cRޖZy^sN>D+e㘖I"AI2Ɉ*w'P^銄ΐxp| ˹5>eϞ,>42Bi/swpɻ)SPJn*vS ,=2K I# #bR9gN!><&6 d~:Z"215I!_n-b%& դhܘSwMj3nsU2kM{,]Id5f~fA3=rC@S V8c P)JBBM^573̼/*l,GzOZ Un4r"G$,p1\bkV p@)嚋e~,0C Y "wȆ@XIlP!w`Fy ay`kLۄzS}S~ǟwECۆ|z~y^% { -Ga=xۮ[~x1e u)l :1{ @TB"J!_n 9ٛٷ7V mvV&-qM{MG?NOף6q4;x8փW޵nڒk;VKJw-1>ҲF)k0yW*UvcJZv4AC4QCtOKW+MGX\o\Yh[qbfqz)pԝS(^8ecݍt{,|Pn|ڡ<ٿkg'r[o'pY!YJpvWM[3Y=<2z*7`܍ATB\(. 1q*83jlBAKBk%5i 5QPjy4hЮ =OADhY1b":[_EL4iɑn;ÅrQ. 48_?snMEz8p%)S4lÁwPZjUJ %uуLJFOmșBG0X}lX\>n>VV C确Ik9˪q nŅn,ng{s7u0W㷍U-bv@][sF+,=%[5Vmjql|T d)!)9֞~ EH `hŖ(h|_OOL_>c#yB's?#U-LMJ~ǵ6eV 'rÞ1_{O8q*7U-iȅh+jjt;tN^Z7!֭-&u['ܙukRdhlhȅh6b7/& ߭-&u[KVktкА W6:E~ºAR(dߣuk}F֍~wklhlhȅhcbLlIӍC$c[jibiI.I,ۤvӁ6쟵O2]Uj6ٸT Ƙ$>c8\&wlORBRx_$=UjI퐓@"8))7ՆIσ xtٵ"0mtH%)t,))a㙌7]"y g_BXfȞ_¡v 9bh_R:Uv 8zEz)/p>,QF*/)l|I)AC#0/y-y,'Pkt)нx]˸kX9gԥF=@ ^ʂ >XzpKf (S mT2HeTPc@~ ;>:aD7($,F؁6&!d <2B(= p;Yz,a1/3ٷ:5ӏW׏z:o\o9׋nQX1dGc5§~V1T4 J%# '!$a~)H&©q0qX"0I$1T) cRB!!9@LLjh'u0MQI R4@D F $"qC$)S qw$I!e,^4G!,H\tjuf(RfCiB[HʢX!@&)XRDHP}0? eˆAtVlk~>O aoW9)7sN]݇'RA#2ehE9M6=`#zVemC[W-\1?Muppf/C)"ci+Q*B@oWEE?u<- T1wJ5f_inh%Z뜷6k%-klNU5~[#w` 0 =RZhϢh3nZeﲶf6ya_do_a Ń :^[!` ~KsEi{v: 8>t\h. d5_fI [-$`I!˰,q+hZ=v4K翢]mj߮rw1sƫ$V_H[Rjm^KWL{iHoSDqu% p$ 9!BEe!Yr~,*l[h4m;ğCTB1NTHG^}DҰDc,d{jjjB`Q ڛ Ъ Zw8^ Klu~I*(õa5*;Z8){ 92^pq/S>)"3xפnj珫$UTfv.:SÈ쇚Kjf5tIz-sٓ:2O7=*(x\|vVY#O8{(p''7IKӘU3F[`7edM a%oΊ8uxp%: p9 #ӈyA|1QE|3"3Ν&I<=2=Έ GFL:'"*fy)Y{ڟ8 1'r9n]@I(yFtqf]ԣKVB $V ֨ u9# O\.b@wP嬺ylɰ`3*NT3p q޷$"= 3/Gis$Y/$~G9+s 8rΥYr+Qts?6llF=Nb.\ h LۂNv`-5ڜ>im9[2.^3z=/ 3 ALJ·E:3b-SClsL/7oծPL1&czJC (cWZJvp%֩&w8#Sr %Մ ,ơͥFQЭˮEgUKGZ`2KU^D22.56im<\ksA_ V8 ~9t~i񥆻f78Q=GFj:J#Fk,L޻J.hír053{d` <_#|KGx!j9 gw牪BZ/_Ych`(wOF}>9/H_zu3WZJ&"q\ǟu ~Z10B4F$ L@R 6HIF:D6 UVCB-Ou\@G͌uOteyk&xp=3& x{sAvLn:0{wH0FHKO{@lM44pw6䮻h@E|*15ÈaTg](c{=p9c=Vf0ڛZ[bkFX$uOhnN^9skGa ˆQ"hqIdi½#0ܝ]xUo2mBhhY,g.Wiq"Χr1HщgbǡS T=v<aZwsW3^ 0 F90)HE1PPtx YYk9Wo=]ړ v:x!$3 Sts1?óY5 $׷̈́{57ȅHW<>mcAĵ/2_~VF0lp̳B-*ƛk:(>N:[pvY,SplwR) A`;ލJZN v!(%WWqra#ˑc?cvW.SbO\ZE;BB"R#txgoR)Ξ}Q ;ݳ?`(!,[~|P(gk:8T򞦓&AM'2@Sެ^Ǖ9AdNgXv}+vl9Ffa~rXdgw; xϿ?6?"&ֻhmq!2,WfKVG)T/C79h^g{}jlԈ1 ?7W̆WP]7,lMԽJP`%bm"@)#d#Iq( F#0"D,iJ/\گ'aP%1R8$2dhX%T+Q ChXH$"azJڰ,<  &없CiŶ5֝k_>6MB)׮֪lr@vi!" г'!BI7o:P<) Dxg%"vaޯiZeI[3Fmqgq)^*^I S7~jr7Wu߼]hehoa<߭ EGB %>.RBe*%QbTBHÊ ?ZkLI 4ok֯ۤG)!/lo]i)~|H:3rљ8zZVZ4| {#CGr~?G2-x@DtbkhbΝ]? xk4avR`ss!4x.G#փgXdUkp"^<Zd ߧp'{`/WgmYoʅ]٢!Sc܇`\p}02G!Y5`/ ߿,k0b3ΉIk2;){<<xz_{#L\x.~#̼,M)mƇӓBâXTE~۪$kCwSU/(8@-6EWt]o-uw!0NR'#*!0Nad2Q ʡLa,(vKi6,j>hKK ZLPp=Ob50Bÿ m|:F4Sr"_:FЧo7e.Ú CFVn!ы{ٻm%W4zK?f֞md<D:%U )d$N=X$oōg.ChE)jdG5 hA מ[֌ l п9B7"* pjŧ.0cLQgQc&Wߑ@j$ fihbMH͈]ATV,IE~&XeO.Wd5sy a |mv`z)Χ ܃_GmCM[Q-Yz^^Ks~au7wQwB;1h)Fmh{3j;@\F X@ѳ \l"q",׎ Sd!2[:{kIc}3Q יUl#LZkA(uvmkAټK[5e:ҳVv(ɞfI8j#ow <}vNvR˘pyRfx]dhӳ';o>cde̼vp돯^7_Y|p|ΕyH[cEg4}vI#1 @9~5Xqzp{"S3nYmJ#%){ݫ, X~L+8;K9Ya`eTG8ڃФ~B@X5bō2/fiZ{qLJk4`pew4xم(킆,(3n:- )矬JD?d.-:V(*1 ~9P[,IGߝE}/' rVIIZ¯} է>V Re[Z$\t@|SwZ?:/BNپ.ӻt<4y /{of?`+c'GdL~+r?Nr&Ag/$cRLAA/t EXNs:#>?;Ww9cb 'OLo~?,*iq u^*RY,Z[o#'a xJad0>doEb"s:l+cVFJn`q]cSϱt7k>LߘgW >1f|[N-?]hNқ7ooBb6tXra1qjvVaaeV0k+Ɯ;/)֌cLwZͿ۰VqO`8+Rɛa6 ޛ` 3ڄJ8 s(a@qg1o +<I[O-X践 &tQfk̔Q0] | 2+3"z?m"O∾ԃs=s8?Xǵt.qE ɰo`uф dFx5>1{ Y$58xla$GO!V-U?Ԫ]5X ag>oG7k/}4Scǫ|VO?ؔNORIDzi7%YoiJa/Q!#=}vyfah*bE?TĉzAm!t[h_HV\[(#]~`ᚇ6i`!? m/ j F_đ̩u_G(~ayr׶OQ jΣ7)b s9s0`V yoO7qY]w?*qxp5j{"Vwnb駳>'zQt_:+]ڢA/տΉMO/3C];i-y^$l4ηhEݛ܍C{沁IAyT/>*JyR!L l0$4<Vj"a`^u'\rB='qЈРq␂?^$$.8ELB g/g!uBF&T)8,i9,K\j |㬉 eV7un-ެ'JF̝Z*5Hȿ\Dgd*plGd:,0n2BaPyO1vbc0cgTRMҏAAYhTgَh/%#l}S:?m#7@SQ/R[E VIM,!AX\ϹTC0w HDxN9s3#HK5΅fACG>nMWP82!!X ; C uXg 4bb'+:LS&MUhN9vQT,,j#ąmYtʉPIk8Lb<'F*0ИȋNYxVp)V73xU؅J{c)hpHRNJm9ƼF;aԂ%XCiLVL6坋! L ! h"}H栗h*[ t?Z `Ugƚ`eQ.gfZ=Ξg8M@ӭ;IW+Ki`Dڳ ^'t8<:\nԖ;4>XHxn"hϝ\O Eڻ6܉1RJ_NqyO:$@ǥqIc H9 1A'=#);E8DJgE sa,ݽ0ƀ 6@3qȁAO5A`|PC,]|q1ڌ)R~S4/wRBijٿƒm "<гkFz 9m }̠^ZQM(X꣆UQ&Bxc nTW?jַ|BܗfVL7ϼN_ˑ?$gbWtIseY{qIv4T;mTQw+gVLߊu_V??oM2YH `hkbAXvZ+X·Vv}9oU! FFB(YLŞ=DTR=T"<g7) %G+:r/Ĥ k^K~8ͫ㰖b8o#,1Vre垣 9Nm%Vr}WEFKIVf?:[#S!;8.j{9: KOlߙ*QnVX8V?.SYy4VJ»VMlWn4wƳs-#cqd'ƻ3_ O ҞMA:/Ե> [36;HZb]g$02EG'0ˎL\"r_VEy񩽙KS*"gkCb \tNN5<2Ie5V_(+߀PUuYSFLҔrceb3@U{|JM35W=W +|W1 S4Xѥ{Y@ck%]:K幝-&KM8ʘPo/=ztpFBg!d!"XiF[û4aU;(ñ#^QXGv{YZVK6iE0(<)%AHGfמ!{&c(+Fv-%?4=7 #Jd8'bF 5F#`k zM,Ϟ)Mfe=$7~6/'XdSVKX`%z%GT҉:-*j|æR3EejFqQ)VZ%SCS,8c44Ƥv i3tEד;up̸ԱM:EqXdR-l#'`uxhtKlG#cxts\9V#ݻ0?t ;].hӤ;LmuG\0!\!g!`4Ŏ9):G/3٭;^\2"Ob^8逋gqo3W a 4"#T!s.Ƣ)4 b{g\١g62f͘/Wjn\[vqǑTYeby?n*F@tO%dY,RW$!Tu9öVݔ8MnpD2AJ8EkaVSerCj{s3[޳{b٫dC=]⇢cK7JUJFpDFsAžBK;J0Cb>zN$Q~GYJ6s+jR꺉9΃b%;N!ϑ&cn{&#3,=+p*b1N5,C)}|W;t)d^q5b㩵N] TXS tQ]/jjLzbiFQA#RRc6Ga/&#jc'Uq&z&MO?d*DO L2d.a݂ Neh Jkq(b A""5Sc %>Jߢ¢-`g'm'z;Tv0`a60[sNVW2i`gˇxv-ʸLQe2.SqY2QcxӘxTqbw6 $<˂T3Clh;Q+h;:Nhyu X"L1B3u]~V9wrڇXALզ?2$g"#A0-1; zRɓFiʸN ,CRȑSP4RЯS UrODhmГ"QVop|ײhFx[؊ uh@hBVYeSP7}<󅳽=_S\&-TSew2{ k$r "Xf[DƚA9IIk);z(wp)aJ' Q#(('2y0Q 2ٶM}ب[ڕ:#F@ %%k3fZ/X.9OB`}pv3Dݙ4wP͂V5 ;5 IqTK3A̐ LM1Dhrs< %Jqβ p ũDL8c &o3㶐8 SKNS't[+fpSjJȜԔ12Ǵ@ MdH f:xKGNJD7oҢɷH\z'BlLMhy8FR*$v6aq[ЕE‡x|hlaŗx%7p(DG)X(#Iq>h!454+QDbRk6!{`]^0<#hi)Fj&ɎKszk֌ph B4ee~Xכ}A3Ta6IA>"YB|CFMaFb6l v+L.Ne[ TF+1DtQj'tDlH=,IOkH6^wDeZFs˝)|Hc}% Vtf*O"0mĬwӲ^3D]D22ƕ'X NrrS(KŚ%L}fP%串SX2b̦%9LK?CA5ry@ K#5*A!ޖ)9d n=Tv픩r_K[ncJҁ?\VP3S}u y7it55V ywŐL0=󁵉2ywm"M5VChTȱ7l7K//9F 9C?|pU,_5: ܧDπO0_oK ʟYULJ{?on6Et35\~Ó-8qFmgIwYCK\\#5 bs = h!5R)`|X=~v{3K}_f.`VWa} I::C(cՃ9 op'p*ɲC3Ob\s5mQ;W a`2DMйެ!01+ DF)0QpDNtˏJy1?3@%?xa2XWן,w1jŘ2P#Xp)[;cD~Lw}iƶL8CEry'ĬǛDP;;[?=8/~"G(; gmBqkԫޮi>*N|Ǿc'ejHF4c:'38zlfWoLDME G8$i+ 4JNK3yE"WL:)P0)O~oys luEg7$`be"x>x6YȘ1oo]G"%ZTk} \{m jAX.-q`6q#җݡ*9Nm.8 $ƒȐ٫RʑTŖ!tOK_R!T-szqP)^/[1.De53!|,#& !.ĄA!ȨaϪPF"edCj%!Y7 .o{gBu`ꙠZ A4ŖrKʉbsj) acx"d 9j`=':~|2GVm`A`peӊV//G0EKOögf~ CVl:8xCv\r42~O7'h'șoLQ_?ͦװNdB?Ku=P =M(.z8({i=ي3GgS?ģ_k,9k1:nQÄ} _Eі-Ȇ+H.L轨tLR~ϥZkTJW;߫*wL ZJ j<= 'ƛ%F_]cWw{|'ߵw a6K>YDH}M'Zuv&!>! 3$O|`fW7촸2;- fҨD%z[8M#1Dg\[B#i/Ot֓J>H`vPsǾӹ. ӹv)e+Rg [j"Em+@׷&#\2kҌcG6K >% zY)7Fc0LcZc -!.M}CX"sQzst}JVwu:'z"d?F67~{*d겻p!Cz)1oɭ(du̓y4H܇?.T׃HHY]<[SV 5)$z J=)~çk6>A]JTh:tMmu(* ^q:z(TdDH 1QQ=zX!qo:{>l`Dݫ-❦"Bh,Eqia)G ]X@SNX3ˏ7E^8 kd݉ωq:}H'/(aaɽyta]#q ڸۘ"q&66Nl޻|h2=C3r/sQ8 U1oN獷o.'`9on:et|_fء(E|Gl>d WI <1 iF-I̲,˘sP;Th ( Ilڵ&@i}fnR8?Ŵ^~_MמԺ%]\g3*DTt񓝩1h>Y8C0 ji?ࢾ.է0kZ}وj7KѲўaPYϿE/:D(+gwI&>TkǑb4ҨpâV5t!٭=Ւ DZ]{$.TVvug%Ӕ!nT i:[f4X%*`Qlj)N؅0ŴwkҌCmn7NDi;^RɌ&7t4fWiFRYH^SԵwz ju4su/ɳݦ.3^ PDI[{ӗi4XW_͡Uӆ̍3N_n7{q{Tƍ\Q!q׽Jo^9sb&^J=ʲԊSOǺ JS8I)M3R !ecQs۫ xvMHZ†iFxìȎg%4a(= Ju9e:/9d-H!%jnpnsŸg!f!) 02hg9wJyA\s2F` aX_ѯϡIɑ䙇oP BzI-1:))'OA-tvSQVQJ Ws4 gԄ5i]:Ӄi1714C? S+C/_Ѽ+I.MM]OC0y{E Ip|R9r }ߡ!3)^{XI4z Df6nI?z=BXKb*EXxOVؾR1UI(CJ(}5bFkT";WU!!LKk԰ܼb]{dx-爽vy3qLc3օFg<.br.36xpbdw|t.vxݲő ̰R\ ~2abe~%ʄ\\(Z]"15.։Jv[?{4ɮ\qVd'A׈B֞4~KIb&KXP ڍKiIui#TaElB֫OS; ը#Z%:A+Ɖ]v;PM{q@&RH \"-Aol-b[0G}DaٛRUo]2*'-z픀iQv|}6eXg^\ʹ;kw}$E+C+oi5܈rj-MSQ3ugc#;ge0SpRu6|ͧ,Z-Tw:W|]S1!^os-C ޤjK)՚[l ɄcgPw\Ux.nlVý;)Ei`M!v{k"+4?nn\)6  YlVھp3$#3ޙʅ%KQp 9&J'&%TKbM'F'ޛ{gAm0s N{jd\rx5 rXT75JPvnb2lt+lZGx>qWW;SeD-$SR{.6d%I V(G 3.NzBC7xt=" ;!?0BaI~z\T`PsY.(8RDڢD0-l %e*{ù*-ثkF.T"= [œtIz0'e|7tޖ3,FpdQskCf;0nR`zI'I,bBaarV ޛygކͫ1*9ZMaXAP2mǓ*a0H:F\CbU(hBN.,WBi.Y/~Y ig'ň{)mų*}VHiQ~e$rC Fs0.^2-זL0Qr0.hF1)hS?̷s\b' U4NSsіbXK[ puv8JP#HDj,JG„dZg jӸ5pJ׶atrBX\^[dlj,Is-lh'@ cNRp] 0R+ CsR}7wcE017!T RL{rddDGQbTt$cTh6w{+T 9¬crC ֆ N ϭi(0@3))QLzb4Imaf5LAkA=ú#2bIz0%1RCV>LMK\jJ1C:V=v솒hJAo8XC[Z8pmh,CCJ\vRRkpUEq0kUEZpdir fOE^|`n'Âw)~ÅĹu?tVSfЫ-"~5y^^ Qeݬhv_Yj#텀abLcL8hyu&⛅wKp{nzoo[~EnE>MI~F̬qT]NF~| lfa !v-^gwf=ybgh:߇w<\JK xngP\ih)xb%ctʙ#߈cZC|51o8S໇$M K :AG)^QyoWݳ$\oГ|y֖)SI:˥<{g~Q}}mpFɵ^G.Vju4xS.8d)pr~5Gs@'Ybݴu@Pf7?nD&Oȶcڻmٰ%cψE[atXv67-aZOꍇ9qgY˜ꨇToX6KMi7'A}sr_7gz7_r6g#~3v&A} %A~IYjIEyZĖ%.]U?0[yu,Ԕ˯^K]\6_/'|J7ΝܺEK,\P3 j>Em\;vS-8U6ѶJ̣ġʑ.nw`Jm l]YLFݳ(W6^ I<`pupx,/xVoȰh?Q⥅ePpcPpSUI(r i"^+v:CcBJ~Ë^T}1X^??ÏMx|7j|4Y4}4 D+ȼbJ덆+1pRs%r߸{ڻ8x-K!3W‹̽4R8KF8GH 2"1M)ҌN`DA\Į)q ^^eJEB$ &;,9\&uFBUS T^zJ_A It THV~A'!-&:ń4 ~Ds.1)\0$2I(a iE T U5,ˆ[}휷n*Y3HX۵D慭o ҪO̫A{ lA%rһvOHuwF2~gQ~ͿԵ9mS[TiS1͡i}ThXwmgjf4 EbĖ"4dQvBHq&8DZIJi B$(IT1aQJ LX-913.b=jFכTs l-Zni6HE[?qetzy6qVA2.?=οz$vfot-_jMS0.6[*_r7BjVsnm?_cJcTYZrQm䲧X},A+I! XDzsu.pU;Oש*g%Ռjc!۴;cnܻwZU{4sN.Rg ޾pQ4??ۇygC(`j`I8?#펠!^q qz,;VT} իJ.ȢGr#$cD0#j,1kآ8&hl k[70mv`e vxmv2@YGV ɆkACNRFgNZa(]RKQ}LKS/9%ջ*?P?BRl&>~WKCNh4)Wr y" SўUnTx;jP ʈN3hy%TzN-<ŵvCB^Ca4֑:ngTn-tK5inuH+nu@5Lj T?k)+ҋ89#gT;0t{6x$䕋hLC3:\1RyyߧӊQ 3AjNzbEVN u%՚H|DOh3AC39:ℜSPL`\k2V!25 aB:xJ౓G7Y|)1vV+M@|>zSGU8( v0pQccGH\VG Tu˪WZP4bq}oӯi83ah~S~}$!@q*aΔ׏~t~u6xx_y.Ip nR$eR%$Plr4T`cF`B')˞}0/J}:+sS? jê2IPX00[>L1fjo0/sBC/e{,É-')ҁŹJ)\ASܢԬ^S>U{膴+]v;݂6ի=ypfzJkU|4N4պVb¢0ppRjjC-ڔyM;oӗМp7mOG|ĉu'X(\dQ1em׾/ 'uzVK$ $5r'GA9`u_v^\ruv1|Z\Y݁鴢0f#|:;z5yȧ AX'iL?L Uev;ROt%{^\8Hƕ0;,~Lfepxv8Tu ~iekmKxaX/ @p^\D:nYt3xGse`+8|N(.OS>;?p!֩[o:f:~tZ_̈́Ɣ*s=RňiBbk 73TZE"hn2/_kWw8*և}~~Sd2sCPJRFHs0L(]5MS_)i0c&(Tl=8֤OSI#Qx]Ө ˁJJ"4l}lRKHWm[;.L&۵#;(\svR/кqp!3PZ˜(-JsiTR\̢%0NrAqc3do30 fv6_vY5׫"qG|3Z0;!z!)f]dz{Sƛ1"A䠫M,YpSpE2CKemjn^Z$۱ (/F& X3_==`TɒV}%\vy H漵՗28n`(ma*?8٧\"*07}#HBqy2A˧ DRN#Bƀ>(Mi2}V)9 /m[Ϫ ZYL3`kA)>T#l8IkiC4kn`מq]^JǢњZ%_ޝNG7>_x=/h>?%n#a XhavlCxoW"6(&6o<1x8uyyOgAUC"e(T`t(<\ I{PAN6'ձļӸ_ݺ-&%D^+Qs4tc'7 /nT%2RRQ"O1B9?SJ#=S`]]޾u vZGs"3tQp =Fq5Fͯ7ªy];տMx6[ok^%լ+%iuo߅QKShL5c7ݽ{ڕ`,/*Y Z*!*Ҿssm-oNMUIQ((VﯸU2eompp~W> +;\{Iy0皡c!F 4aHXEYHŌjt\(6QQ`f S/-ӎB#<+ՁuD"-XK8HIJq"TZTd(M ${Ք]BjQP>K#Q®7 XLn 0̣;c&aYD̂'i2\-hpʥ{{rtÍ>??܃l,!%^Vv_}>PkmF/=|C s䱟 2Hrf&ߏlv[fD`ƏWd=HV9<ؐ5tb0JCp^z޼xzd^2뤍_a:>'gf:Ǐ"S7\"W{"m%"JdivQ6$NzLw}=’W$Pp"rc v7*-k7'~#;w(E9?Zi' ЄiskFr O{M64ʆirBG*%|g/D6?n~B>yƅ.i?fc c|r$W0V4iem3䮯LU.v9}׺Gi?swh\ ؞Ԏ7RZߔIhLHsaBɼy+OWԉu xޞslLM4J"~!(8! 34=?X3Š%eFOǠ $ZFt4&%-s;D3m 'Af\C,TLkAT6{^v g4ywYfh;^>`ngR( Ƹ[ :=Sta$]ݬ<)|nNu ݨUzd H-ԓ Ȕ u&:T7 K3~jY ;VŽf4AMq3Vj@2V**J:'Ie-㞄L OIE$T7|Ϳp;AH$m+xY m4PAEG("6bÎ9i# Nyo);a(Ms9m/C5 T1 O] "P?|ԇ܍_O܇0v2I&dg|ԣY``c+Fa`S [H\<ĥD ` ܬ*fvBYzϓ1 x!!;YL!JrRLɀyBE0u`EebpA:A&c!K dQcAYw˶We%k/zW{w&n>5I/AR˧nH;>Q ~wKjZï|}@8E׷Q`*&yx7 ~񫳨r5<%E3c3}r?3YM|>Fh/Ӄ˿ِI&T >.{%TI-lɌh6 ;[ƣ ~R=$ e#:q6)! DWE y(TpEX *k0p㕏qABcFuC!ĠosZjzg&_W)+aR`J ->q@zL8ir:$i! =v@?»qP "SY!c|õYNG*sV NOj'tnڵdJ€ 47>J{oʌƵWK*iJib+ KޗD}fwYn꾛/#?f/\HMp)bZ?H18_nd5Yp1L D4ןg= n'QIOJHVې%=WC'~ln\hH"\#Ÿ+Nzl *|)1 na T`cTHq=J{9#llђyŨ,̼'< H:x$qј33B 5!0c y̢*T瘮,Va. . K,. ? [0y8pm mTY?nC1)Ӥ 6J*Wh5D!i8A;y^䌰E@ &4-7j5:!0>lμZX QAtù.4jqup` UDɷm#l>i:L|B"Ur[N$Z LDP$5ӄ:` $#=GBt-^bn`>o\rFf\0(c0""l|2xZosF|Ui;- 1(q"=E`,XC#`hY4(+'W2A#D)y]1 j([&,.oȷ!_ JD_{ނb䶖-olysbDHud{xTٍZ6&>w)S*} p n4sny:0i4T=&/ҥY$.O/;PT`bSl__5u{Ꮚ;rv\ߘ2-jvaDHȪC/.ϯJUłQ g kd]eTRH4}*FL[6̼ZS`׿Ozʒw|Ԓ`2 kpY1K|+bcߠ߾&'=YBLBCU8)rbKȈ`%e?OxҘEqZ:%dA.`}V}1# zj6.$NNGFQYz?5. kF{搔$MGhrq~9dۚK9Ʉ I|7Go5WQﶿ@=l0!./ets}De꽴/݃A c\S/TxaZf :1ד7B*!"boG_f| Tv-e6?}p)n&9>RT\fo[C:Xj3/`'7gecGA5@#+nj_}|l 1twXZNh HBAGJA"vlH+αJ`r{#g.[G$m6'KkdkR>bkt:cguAIi0 S3mH5>㼨 ^RV)kqPxyM_0и~TSa@T"h 1˩UEHŰpiPL ڛ %l.2x [v4DҕZFQ= txv8n4N`.9SKsyeS (J":JFmVs齩fBCoα#߬PSu`,e4n-e}1MSX-ȧ<gREGvnPX+}7D|*4CS˜^n7Z^Pu gt}Gr~W)4ݺn%fȟ|vSqi7* jLh^0[<ڭ Oѽ`e!yoϖLavĂ)<bΩ͙*^H9 5)R!I*;*f̈́3/xq[ψ #AQM#lc2!Ӓ4B>gK@wIyh:@1սA|DcėJ}'c#Էvo>yjy'7[zx'F7r[HNŀCq[zy ]RR?}iR)% [ɲ2Xڻ Xq6Z0PV0pio!!xC=/uv>Ryxj2ݤqkrMٙ.1^Fd/(X/Pnd!NcZ"Y1T@y6d&r3Qt2o9%gk6\rp&Ue@(@`C6p85 )M]̌OL)yhK *b&aV?Gίz>&DK7?5xvs7/y?y^\ 9V'E˾amξ hhۋȅZ/:3u?2>qq0Ť"wtNUk@4ϗ@fؐD8RщrƉ-*l L0ijFmyGG>E" ) =k}):ԚEyQC?s ͊cR!0mgϪ_?.jEy?`-a"<ª>].$R" +.w'Fd翉..D(*$b"  l+uYE͙Vқp7NBØ.FGxy @UDl  zo>*Չ^ =򓀐CZGiՅ<gq!%҈J.eiz\Bl=l0`c5Jh,C%sFb^u!mU2*8Hv]=BwbIgA##9Ƃ <9#`>U(cāƈAܑhC <'؀1^y˖AXUT^q1g̃䥯<*XaHd56X5HcPyyiVcQn@&~,w8u6 ^?|%WR]wՕ&)g9gz8_;[Ty,C־6Fy5h-/MJ,6ɺIy˂@vWŕ_dfD) .ȇ.>m?[<ނpV/Ŏϓ:CF6riyIkP`8P5s=NUw6(IIy6xA!VBk>)c-* C )ZAąnT[NTj@n.CFaM 7tNR`qVkgPֲF5Rt)sYA<fGeKuT/Hb)OZWXZU~I"mQkڳ BA{"k iEINB(u|z$T J/PDT$D2I/gPmw:~pC5ER0B`*NHeE ^X'i)̠y'Zj| `H"!Af,Bkj┊[,3*Qv(өRC=rIgh>Xq` H.9FBS!amy]km-oFQm%Y÷`8Tac YuhFJ.Ie'36qUĈҰmZU LC(mTE/Y(d넘1T{ix!Eku /I:&N%k<ʼnHUJgmXt L!ԜgK$~dY{Rغ Dk-M!6v Lu@FjRl79T*v#s4 S9>y;Bb7ukB&x96  0a0FŚ*S02îY!Nl)Rel[R, x䩂P H\[,7b]gO"Wre-HC)o.bx* $I 'Ơ ψ2P]!AlTLU 3rs I"i0WfFɭgM$!UӠ0ˊcJU@xbfS25i5FR [/`:?ܜC 3m=q8,$ۥwS{DѶ#ݎgU]dbH ޿u9 [.y zi;|j<8!U^5Hjۥ3u}}Qjwzidw~߮\wCc:u ϪjH/e8j]h[I]eX֧ukD׈t5C[w$gPF%v&g\5RqYe;r9;:NzV9< $ɇzΖ$*JTY<.p2f[)6hnQrձ<BՕt/#Ud|6eWojMkj驪6Ct4UQieW RH~k;VuT>̑c=*|<(\v~{EW'SWԮ?"#~UYoo?___zӺ?<.x뗭D'dZ ?|<::3C?y5x{t=fX{=fЇm*ة=ΘϘ޵EfLӨ ,{\S|g諾}WgtU7ػn^g=:ϞT1Úɳ\|^ g g.% ?I`I(n[HF4Ow5`mh6Hdrr)k5BmG=J˝pdrZ-U7NC' |vJ ,j{~@Y۫Qd|д'͠ĖzڼM_$gVKmqW.R7 z]U[il0嫗ͧp0] {E$KLDM:5/sЦ:`{Cxn~ؑOc3Azpoz> -T!{G,krG*y^7fYm>ßS+1 <թ{Qsx܉ ޖn$-ZRI_p| d\d2 jJمv,%NT c`TRKYDVF60r礏Ja#V9tRΈ6ID6H*$NjErZ]IEVD$kWzZ4_n^uĦMd(0hc n,nLzwZxBkX^/n0<: pg'a곦\mNTiY>n|(L^2|4HYoryhLpY}a|9fKI?S!3ʓ{+|&+OOu:RZaVaЇ*_H(mi'W6W]}zJLRU`#4Y^V]n/ܟ"wU9\{`*gAi7/߷v+U#Y2N&/ };RߥkL8KOO])!hMÜ5d{"{sjjcۀٕcgWF>Ϯq-+-Ob?|6-Ǔ+.>-,Q*"8<.b6%pэ=WajSQSĝU#P WMPj+iMu%&ǩ*k d,!_kB5{afZ/n9zQ^wYY_g_Ƚ-ꌬwR!0Mx^sxd<-/q+X,q+_!&pJNM)rv_`h~M$ɇ`|4CFZ'NJ 'Ɯ W(KlLGUkcPBePvM*IPMuF)oT`_HL5#USQˆRG]">q).~1oO˖]]1Y./lV[,D;;atjbVԛN?8=?w˷?*Kq6#9a.\eˇ>~p0~!Oxu5M^DZ\xΎ4W>~9[W %gy [ra iN _;Yv\Ke)7!Of@(42*ܶ:)esp Uv9 F9c8)9Zv$ $b7[N&V!p6|@W()(` QM5Ecf#Fu i)jx,8KݮNGҊ$<I&r2Ȯq: Iba,S%tH:tT~7 捵zaKvCu];2.c>Ϝ˂+8]="";ɇ]g<E>U\EVrzSQI_0*^h-1( A;ce&r+-Z#E*B5U8h:4`fV^j,.% O %DjFu5&@ຯz\v"T*]jXn2Fe0SQ e*A/TJ%Ǔ|9% !ڭI ;,\3ѝ }Վ }*Ʉ6LmXVU[M"F5j6aAF be‘iנ`{2ZƦT+rn^Yre~m{߄X,ȈqLJ@C` |,Hhҕ,E ^Áբ9ox48JF1džuD\:t,9DybAݷS;Vvw#%N'WBL÷lPrćoq=pL&SOq<6{yH/9víj%,)OSF9+RkYj*3D8UQ#%0R\\+`H䒋T%QĘ\ƚTKBrt߶*2 ssIHåҝn%$I;)5X\zTTJ%)PdS#$;PGy"FUd =z]JɴЀ\Ջ3S>]WXďjqSTZ8Ps޽BŬ5ɀБrR͈ʮTA25R'C3t^FǹCw)0kbx}9ǿynw|{'-1oy شu6>Ԯ5cJFQ&}ǚ k{zztUOF/}_ITSČЖGrwsKq͒to99B@blNf:#v8tH Ya_i3y7B BZJ,T3ʍ8v|(OCq5LE^oOԆméĽk9T+]w6զ~BmEzMclΣ&h}ub2qvWn!.=쯮ú uʋ3Qm)Yk& sɬ ) +`]s$4W.:UOA8Us׺ Ӻu Ku:u;.BSx֭s\փ|*SO9:G//6WN}ˋ?.g-  )Um kЇs?( kjI+nOSgJק׭DW,+c J"䊐SJk[ae9e4.?/odd=<^ _i))xkIvѥHTj`6:f'š2+~/{s0A Z64$8c`7wXߙ@}Fs }A!Y`sʶo]E߉ju!|Nh9ԹG{9μG+Ժ|8(2sTj`ܽmɻ-!RD\G R:QH5xYp},4WN:%ɯԻ֍ȂAt}Gv< eZ/2u^I.Ӻ!߸v)'qZ7)XNw4nGEL5ͺuZiАo\Et*+)7˾Bep(,y-C|DC"41D+OmL\N"!Cݘ 1R1poI1,yFԬTICD3 ]3l UTű xp ys)Fж5-C փgZ Cڡx,5, Hh)c,X(ڠ s V +bX:Պ=.VajD2\E\VA|}!з},IsY\~7\Z9߬G==y|3 ND*zn/z'W׫- WO۔VF^L+L'e!X^/ӜV,UydKwR{NciҗnszDsy f=\8+ s%.RNޑ7#.Wk_%j l2eSdTӡxxU _S~D,kת1jyyo96h eZm7w)y=a\5f0\Oxo9Y3eFmoAOo|Maù٠k2zm&Q"f@mevDf_9 {j' ,A=e1#KwrSXGuÚdo6Gv/94(JvF4\PRQ3dgqp~b,aԪY0klʞz-o]Ue(:j;ŐZ2E]Ƴxq[1?xQˋVјx˄U,$ZPFfaHjm]5S O0"I35?NRcfvz\l_~g]$',^$Z\,&'3Szǜ'} j9H.|TR1}Bn͑J v dgeځ3ePy&TR{}BwίS{u?uK⎟W]^xJ6b ve22A01"\XxNda wa/3w|1TF{ڵW҇۸cǎ؛G?q1+oRsqu_nX`/+i)¦K$<4SAFys#Ǔw,K6eNx>d˯,x33c&b^/p' +8%܋=L~IjdՇԲI8[`/K⑅*0ڳP_,qd)Kɲ˫/v 1-!_KQyHHjlh4S#S s&{DJB,Ve O"$9#Dވyc4c" 1%Cz6>PzhJ}\ B1y.D*n;"]Ԧ~8яA^(*?!J?@5AOK& He'A > CJ+&Kn Ơ?7swjVb3BbA~ hσQ0ZQJ;ziݧIGcѕ/(pBuMBWېa{C:JtEs,f"=ޡ/KO9+C>X6$+eꗹB<˺T6gا,%ah/Lt9p_i.K.~m LHJqd6ɝ<*M"nK5 S{N{ةD%Y|7gʘOj><= /8Vtl L*-;ۏ_)ߧ~E\dAhQ+?դ-~>?]~> ZoO.1`S+Cy59ՏTPаg< OP&W J(Iz>2 DGHoOك»oP@N|*)SnEGL K*<)%D!:I>zk>Ps,SHyaB`1s`О1(dX4R#Hdt.^ 'mfp]")GF(nGBRwEp܁UR*8m"T=R?c̣̅:)D>FSGy^>66@@q6 N(뭳Dkmk>G@FU{Y=V۬3vk$1K|RimR:bYT` !2PXUZXz=g=*R[t dAYR5L(GIΈs!xtKkd We3+硣GAHp&FŤF^p8ЏùxCՂ} H( ۆqɂ4h+Fg 0J<w56tJ:Q?((h; 4$uFSJ -ZFeVhd` u]` |?5թޑ F6᝴2M\ۑn4(unxNOMFQAYnx'I hYxz)h٨GA5'g@fRH7{]gٽI\M?6jЫu^BҍxT611ca5^8Y< +B!9;5B'F\ VԂBT(%*6&>:>x]ZdTo5^CMY;VPgP //*(y4 W@îsjҰbj)֬o40\XL gv-b@- E 0j@jFBhɽ㠬T9pbbqbjPjZGC-cֱ]5 A+ZRATPZssE)ЂMdPG!6 4ĚfPΡk5jK %'_~Nn@h|PJsTU2mATCch*CZ,C" ^:Xrs y!A}‹zj6 Բ oUž/#˨(J5 9^ ӷg[q3q~Yg7wWn1~v]?̀}01X;Vs_Q6̹'w.OXS?ƴ8ZBdZn۟ҳx+\;\D[ɔYvc.ڭ- RD;h#O+m;nY[hɸ=?n\[[NwnǻEVn˻[LAbi 4@x6-x6eų b,~[LlrO#Bfzs7ey/[xT@Ae^W;δm磿M}L?^#? u7#pv9߼d̨Y$ 9]!(J˄߿8㺫~ĭ<'%Ap1秳?-d= Yq㾼mSɍ_b1T~۳hXdLϵ2zi pj4'Jpr,}FM%A}4k JZU + Oxbn;g/ !3`z`8كE/{%b?>+,O?|8;5{J E'%5wIBo-K @A}p{-\ %MompZQT릩2E2sS\Q%oC.BZ؀8zV]f?/6a½5åP ޻'Zlٛ3$ʯ}_&ŕ/oӷ}D1ުKXM`W緻_:y[+$1oeYm!16Yy`WbYl LT"??/ep ^$<ݧcMUcxK2ʨMڪ{Pas:W)!(adrw&&9)0UrzoueIݒqK"CVNDjMN8Bѓ<.kx~",?VcTn])8Lwғ<'t S&"&D0HWO $3{l>L K߂O˧GDEI~եҥpz~1`꼾˂1:< YN$d`J7)+/LGı[Ĭe_:yL͂wќWkX΄aSScYa/B݂~Ŵ5VSB|`hq@ FXڣ1lsK J2ʟDڪ1]K{+豿_>XO&-ʨb.ڸTIat H_o>?c%R"RtC]l;žϷ /#4?3ߝd:/]مgwJ%rb14pq6g.'blYOs|#Ԝn׊m[/WC9Mz0qjKlmKn]T7UII3x.թ 1F"9Z ㄺ"G*#Ä́PLP3а8G[DrGx/HB8҄B8=1ҢAAC+9ԚjP idU %ڽe!RDQ5)Ys˵5HoҵZ0v]LS+[sG<ʌIa eB9"oB͡]!q&h[4@,Z 'a,'A8jAQ`M xcjid.7n{8 $!?s>$p+VHgZzg0 ;Ws+-9 xt58ox$vxDL.H_<,wL;,N-H_QA:$[A ՌoSz8]Z1(6h7p8untl9.a*QIR~ LRΛ2hqђֻq׎z5TZVi.1ABGEӓUv3K^듂oH,a7.kZMֆa ޲e)U/ezNmiG[Մ]0JB+ +HEah]Vs|T`<ư@L;3iA(0FH ݄ikBV"(#Tߦ5nI񎯏2$&Eњܝ~V䡊N*7.2-;zWubcM6R|h$7l0OGWI8}™^^>M; ,kdN.\/pG`FoS~8[^`*;$6(\?,\'TSzS\8}^)0I]{۰&>mzC] ozk W=}Լb,DgDO.BR[4cY fiIAǐJ4ueR˞IHjwcׅ]s}:5omcrmj-t8NsQ={;wL=(2I@d *y rxp6qQ҄C+^ %9'7)UqY<ێ=&x%|9h9hDs`@'/'$>F, I>. ?rMϱF𦿞dV өG2J1/#''0Tb(\Fus2*LjbYgs8ݟd8|1(\N'70+` x3Y -rX oX LN|_k}(zf# 4V2m(+/}eĵ~yc׌SCsq'cM4mnz:Ekf `Bq+̕yȕ;!L)'NQrBu#-h7*6(nm/fM)3A9J]}FKjxjҭW2T_1ʀ`ٻFndW?bW2YUY l+k>=x$YRݤ(db}bX9h iʷO3 = hA ΀,Ի#';?&AT'զVz 5Q& :Y3,KJh_\\dX+2d]2BlH-y彃dI-{oF9%aN/5ϵ7@ TR_mJMRJJY)pPvDltGAUUu֋)UL"/զ+K ,8Z)Lѕ!{HP[qh8aid ; s; ?8{M2 cϋ+)]VpSTjH+љTxzu Z]]I 1z!! k5ai~ǝv>i~8ZSa?t z'G]|!5V4L~~}Aw]|~A;{[6A )֫ {)+3 ^bk#>1 c+fӻM`PdZs2RI9Q7Sn }IܐW?)8':uY91sd]242N]{|y+!;y–}u;:'ṏYm #1dR'5d <|9@hb|1/ߑ'>1X2rMөS Q^1iJWw:ɻ~b.R+e1r,FLɒ D-L<*"!a5 DL:QNBסF4!/L 1*4PFh, 9q#FX󪪵2XvR#j.#wTvB.)Fr Jm'!31 jJO4*#T̍ۺP)D}01 !w $y:(iV<*wKvujsc7D0{8č|]'N 9K/тgA ST< V :Rk`u7Tl:.[gw1H8?tc wn5]+ K}{exP_z%3YwD*ztYE+e!FODDh"$z.xІ&Hx~0%tWa}&:zA`l<$DxP-H;)jL9BI 4}tJס Σ48װ {"̟f<ϵyOM9ܬw3 :@ .t$7$)T̔FrLh{u`@QHWdDN(GlX@rQ8t5i0.LƤzR+LnW+IKOn-K-j)`kM;vUgt S׍DL7xiRG=#% ~F-1=F;|4m+4MS;B3,kN9}hMUxJ O_ n,faUTT٬-P|vV'2dzN9b&LҐ/{n dPd$K?0G4D0lFӰ' H$N<D@&3@7K;w }RqSаܺW0 30]K[J `һQzmG=8F]d*.F j}6FR5G麒* Y1VK²`R xJ5v*Qgud+{2`:%򢩫0#9 XE2AaǨy;CjBJkF#Hqgg5IA-_-RjpdaFBҖ3Lȕ >HxX³cTZn{)0HcZ ͞o{k -k_ Cb>s$}(%e,xjѦZ~t7nįGٌ$BzLZ8sg /Xn˻.oXL>l#}\VV5jSj ³ eVV ZSбn睃BOH ·~bMSOvXs4D\) 1 ȅ'ܸ^S5]T.!e\$u yaOz56G.<18 O/sn2+p!F/B?D21MW&ai<ѕ"Z{7?/Nl\6/ko5 9VΫjɈH&!do7IFW0!G\nMz"N܌j(sy/զč[2| Du&>@Z_t=Vzncgƨ1dZ\y.pˆ O{JESzzƷ6mm15Xq%bf_r&]nەz28i@_y%ױ~i֫a|[\v_:|#܀̱g61ӽqOUbL6+ʔJ(ݶ?Ɨ#r"0_ĥ ǹ9o*FQxQ[l+L{~áS^yrѧ;6&/MS촦p(&VS!{FZ!ʯD F ȈƼ?TO}ÁMbqϽQ i]_xJtN7w<m[5 ݧOQ|m HA4ZscXԡf6TɀL"vOVXP 5?}QxPmMPt0%)aձ áTPb9T ҃dB:' 1Vj^Dw nKս)n8E}(MiCn|XT &Ⱦ8iQLjSDVC{\ 7}C8U Nbb0;ƆV/ ZDѷ Q1  zG27 _uz{.ҞE]2UÿO֭vf4|Hmx/n4_ef52?<}g׍O뺉ifjHܲѼjk#c|d1i~o<+ IL'I06gf:Ū=|m 1n&tVOMP+=G+EǥruJ+'զԈ VzVTMXGm9pM_vW^|W0{Njd]_O}2_9lMͬy n~/ӕ&MU|\sTؠƼ.1E^ Of_.?}ٌ oo 9 %6aV ^MPYy] -}IF։1Uڿ˰4%,ka*VRR^KL8h(j]ZHdhsQ#565ʬȂgJMQji wI})ć8YrVOiѦEZ} g%bwJY)QVYc]T9,هipL pʑٗd;E}^eI}Ʀ~R f+)34P T~fMI#y8xN^%c4VIW"ϣ6I|4\$^H"Q(IEJ)ГЙ"vbj9?IpsJLu]9qVe)AsA4E!Ib(,iԾRFkʞXewg*?V)?W]!R =gMIv 2}wtxjb.:Z>QCRBt _^ibl8(&;K+M|J{;[!/ 0>|3dWC-O}fݾ-C9QxeޑU$6a 5ZOGy@L*۝ى_Y/ )h+vN`rq2ۄ@L*kvawwg:EȬ9ʷ~6n$sCǤ^FqeIXA)Oa.Fj:Tϡ#s?_X2蠼'0&$ I4} ;@iC˔}.bމ'#ӱqҒ,M(rT\[y7F&էiQ}=^ۿV5uںˀQ XgQ!RK\cg-q-cH">=S: #0T}`H EJ%_H=INz#@Hh2Afa4=Dddԍ\}*|z"{Scm<+yHe*}[ RZ@vZ3 CO³cXS$gʚȍ_a%גB+JUVZ$c3("98F-ii׊H7i~-}]l5[-3aQ &)o;ֽer*uҗI'9pz`8=?}{:N2*2>'mQh[0f+t4fǬ-`iZ9mabY8醺ғN7dfݾii{twﳢٛw?ĖǙ:)r!LǒG0ZT-_lʓߘNe>BLMYF-A`+!*, v12BH1| /x3!a>d?jLwd3vR0!0#L/bϴGGY%Q!$Zg3ӔWysa,&=EA\ ӏ΍n[(G8+G#T#*)Eϟ(/z~EJtJ(5aVzVb>fG社-JMLj^fĮfgRܬ40!z+h+u 9笕;aI}[OzT~Js#%|⧡Iݮ56UfTR+S@c+Xߠ**0ڋ _7ftDuIF%4yȱ_c1g-K[ہE+`.:tP0uE1Vj|sNj8z9R~KLeOY|5+î%21j D!V! Љu:D~pY 1l^RRZ),7v&-ilD1 X@L3\nCdt^Hȹ>. %uw:"( Iu9I$!$Wz\.%<Ӊ:]'rY\k-֠3uo?6{~/]-Hoz҉Ayu7CƶmY۾}u b0 aaBkKAh:$Ƙ 0MpdX(šZJ?RdAFLf^BfPY![.6cY>KSI\`FFJx}9*9Ѕ;B0FBiHpNuhfۀDY]lѓ>Uyf۪V[E"(%zmGǀ92G(!W~G_/ +QB@.`ج|č=@9gO# S27 ^=qAzŀSh"tD]~^ۢf DMDbl͜J=Vf8KYRz:@k Jk($fEHB_$H|B-6I@$HsBX,JR ILo35j&|}Oj㯟ټTq1]{pz(FԲl^ۊcK_g{F .J0 aO"5؄mu ɸiܪl5)kΎ> '_:8G'^ K.?Ml'҆0z׾WP ?$D))=~(Tw:A.:fsAp8L1:va>wmǨ#D1#PgT먰A,t*5 "_L8g?"fC1sŜF I%HBqD96#(AE*D$H1PI)bzDF칙B:~*`h*6AI0\`TcebINbb҈saAQq.x̻\rC?0/}Ն$ҍ/xs5x6y;6inxhɨcL]qM22wto)9җlG2GRК, J lM 50KG Ȗ5k1?~lPP36ղ< K=(T J݋{A O.Rg.pFf t 3(W'$*"qMװhb0PLFfU` #◮Aw-iG` Q`fq:?{L& ֚4/ӌ[?Ձ#N'LWi`9vTŎ~d7Y^ԑ EFv'r ]zfeR_f?n讘 `FcDh5Dzp5f1e&,=^ܳ+}ndfi+Xŭs:,&TL,wTPP/'sݫ5?* ;*" +׽[LnVoTAPᚲ)4._" Δ=zZ3\gʹ$O+=lـU@i`i=ϮlE>TtE|QXi͹qtQG)b?D1P!yqCkۃڵr٫QL]ѷcS$FK#񄒴퍲KzD0%)٘aEt ?kjOy13}e p}4E ”tؤrW6 ]( '4ޤlh# $I !ܬ+ &CK"B<|-laKpo% +:)s`z 57ifC9N5Dwm"Aֻu$mgNH:etlX4UoyZ aN17S3dӎT] qϽ $|.#o?I2AQĒn\qٹB ^{iw8BEF`g ~n O߁\(;\X-_:u! #(@!S4b,!a!!Q  RF\!%~[UFT#['b ! %$ %'ֈ*ё $L0͂ XUQ)eK4Pw%aL @U9}AB#_ %Cʁ<۳T SVn1+Jc3#'5 IxBGHJ! A߷Le*iyap3DrX 9i;eOb5c(۷8SļrZߛS衎h/>8/ Pל%Hun} s.׭G7$g5n.GStf*٣xÏoBX(,1 eCa:܎7Š!<VH΍y\] zkt~Y;Es#"09NM{PrcִG3Fze#.&3aom!/KS=s cܽ'7Oh{\l ִ̫ꕙ!>V?0BF?`:XE _?ff6C2 o7LJ\Q@DSɱ"$8aacX!C`$C > ◶|дIh`X| Kɶ>po L!3˅m/Z<⳥Nv&ս^I0X/ \_7ӫ3G&]G~2k0Ig-xo Mž#:}p#HZ /xSjH$nqA!еR#3(y>(:f70ӖXx<<ES| TѓCybd H#t9ew1BBQn'M049a4b0 A*]zy?f-m$Մ7IڽIeu-EJ#e^N ;rAHnSm"o!` 9r"ҡBVko]#1CT]" i~+_7ͺƳo-N |«uXoWb^drR pNN:ξhTQe8¡]yj5o Seє(c$}d/> $E1G>N҃@_`~;tc0B(5Ɨ+S5=ֿ6bERCjmagZ4N8>Ll }̥~?ty7wD[!t)UATre;ľ c]϶>FA#,~(8qyw5|"ua Ie$%/KNY BA'P`90 2eP3Y( g G}@rH\jARd:EQPIȤHhܤ(}v?:{d^ nZlќ c#vReR|pJn*SX_{{84R2zzOGtάc}DJÊSjR.d4 2\P0W,##<;YIiC\x6.YyJ~f8ZmIn6Inr41 X=5CKX!bN/8Y+LDZc>A@9?RE?г~Ւm_8]gyV?D9nMԯ7\i EdmK4&[* ?=TC(h G\{L\:8脸0[# ])$<v{H_ڶk58,}#A9ڗ_Mw>Fg϶F ;` 9tAI~Z8ypRWErc ~:M~6pOjPLkHrH0B'#2!Aqva+{!Y7Gzyr7go!s}dU)/EoZPk})zץͰ5N#% {Dci7Q|C⊬WL4.buUb>"rC=J20FAR6 oMpTW"dWeF2/ %4π$\&f )d"2Ɉ(HeT0%N %rokv75Օ /7\Ckb+f+qwkRuT^}<|_:0lkee!6 qb P?ކ_og_L0\ 6ff̍sX̿LS?Uf 7uQc]YItvO+4/gwy#ɾ|(˪6 ޖ@\Y[ivCO]/&; o|x܃sYRE~Kf* KɏB{-w\F=?ό*A?v$hI'eZIiN$*T)ӢL@iEc:e r HdIԭ.xSzaf G\1ZKROi?Z;Vqr22w&H_hA)!*It3&@s%@E9-y. ԒP%RE9-Vȅ:Nj#7L$Zf5IsRSe.9#LaiV4!S/*j]V[dY2JٟI2'Nhn2kR iJMz: ?OԠj4m;??:k|"R7戢ҳR*ݬVѕEWR*ݬ.{+=k+f@-ޔ(tM>>o+uu#c L4M֙, `VČB$ (dDfyB5~&5:J#[SI=ޔzP[8vR/ uVO@%vV* dUȥ@Sڼs:&v[XC.}۰n5Jʔ#XmmTPFCbf:B 6zJyk D 1\YqP0E{5e"F~U1j] =&(u |xίut̮QA;Ԭiր& ˽e76Ʋ]ܻiߞQk?>tn_\btҞ Q2GD'/fn$4I{"Nph ]@;xpݸ4suCNlj.̦F~i ޭIwwNn&4H t1z˧+#8QyBz,1> ?V, &j5֒`VW-FKtّbN=U?~uxՀƎ̩ 0|jG,%r傃x$EUd 2IKr䒗%,JjiA'P`90 2eH*R,4O[Tde!a^ m;6c4_BoO^ }g{];;IK?_3ѕ~γ}:Nɪ]re3ifvS{Cnad'B'#Yp]˜|>|c|1<Es%( r>%XюfmeaFlIar3WELgs*%YG^T[3EfnGBb[2@;! ,nHs$vZb6)defפqug*uTC bjh Z(&g<sZm̶W a?q3 zeHKnFW>pҀ6F4 4yuHSpm_fS+1ߧ yMk@*{ڊ} C}sBXޣa3n:HMiiOXW-zY|ªOPB) 5Gl$8zm,7X6gӻړ!0}BNc8D[;#gǎA#0 Q'W~v;5"DMPRI\exMA?LEc=(g2\ _ʍljetQ 1zZuv\͍ۖ0Ih <+@ǖE I"$$I($Dgԩ!YkX :M*TIyVp] b`|uy˫bUF>oوh!|=~|`L嬨>urmWpH谾@̟o^^ l~+G}C@ͭ=~!FRTXWK=#H0p% v_on̽fV(~JI/_7]wMMJM +nHyED-UDoJ ZDYMAwXX)JI=ޔ Ξc uJAYi%5Hӹ fX+QPxz)5WsR#q"GY+EyM+y[)7+`TJI=ޔDOzVʸ*ǧRݬ-w&z+=o+fIKAYi%QT8ƥTSZpK+g}tV$N+KoJ vaohTY)oXVJVRsдҳR nV #EqJjR.ܬWoHQX)nVZI qy[[dW*[_jVJ˂JR+Sغ.&ƶ-iԭ^ms( ) -n4.F^g[z6(hO‰##DAWNN5NԱ# 7bj4ROe1ܖRy>joq0Νۯ^%KZ\wnc!J+8q"-ǭ㜝RppJӞ& bpJl)j͎E6 qq=N}'8 0;&tWN,ӣ$93rXKs48O2+Ƕ}ބiWnZ_lI{ՓBv|ثt{F8y}jIPZK'ݐCo봡Q0"J_\Eg5h-DZ%\Ocl{(LZ?7vZ|n%bQh};覷_hĀ>|h(fj`?1t[߰lViP-)+Z3.Ǐ]=zCC}9yS5&'L';B'S(FC+yA0ݶYy?g3YYHRDvw F(Ғ S!/LPf lQ x󓟏|E@^OF PKL B@|umz6nv9ᾗa Pq,,zswB!Ϋĝ$24i9O{-p0e QJS-VFK/#;|徴ۻvK%Qo'+PrbE eXn\i+7Ùp8=s_g5%mm5bb2=6ok:GBOH( }6g!Ct Mk* $oZ`04Ɲ!}a.PJ;f fJy[|TJI$=RiSܴr밙l "ʸp P㢘s#Sn:8q6aDqI4OtIT\Ȃ@aLV߷ I422^L[>/TaUBxkYgTHyܙU8 mA2Z><[@n0MhO@6mHkN"M[F )1\ Ka:)T8TXㄣmrr# QJH8Uv *:y< Ҕ94N"@#Nld :MbU$N}]*imb)w]J%B%;@ XT1Uc+Ȕ8?6UHk E,dbUDJU2!côy,]V{⥺]'a~ NxZ#T#cTfj*4I 11TjqGA:*m%n/79.ln%z"09#VKx_N2`>* Z%gp@3GD%g[I2 DxrƓpJA4bf7 tm_lE;draVv9.]|RPq``>_O@&ƨLTVFl]4~uX+"xKd2ѝzECo/_yU}1X# S+' /$Xw0k, (Ä Z}!zhI> 1bZeGXkim$E6MFj散H@hm?Mo4鶵 {LSUcBN+җYN*MÞsU"Ir,Bӗ,vz-EaM2_r?7LtdtUm9ll @EaL(vg<|HTg}ݏlWׁW#fsw=Vq`m =֠'V=@Ԫhe=@ׯZϧ?P(&Pڥt\O8JSbשq:8>8@@~CzCy :_tM:lMа#ڛβ#Ċ(zG UА߲ jN]N ӇkTK%R]p/^C:Fܟq|dbQC휴"i}]{WT;tz2σ;:Ȏ*.ajvzϼVЉ}|Mѓisb;Ûӕu߱m}O<@Eo)GowfR3Xߜܜ/;@~ (2?)kHB^֐)Utjwڭ&AS.SͺnCC[EL !{qj7ڭ.!SU1wPљv'P5!!/\DdJʔ D*Xz^ 8uKap?{ WQ@D|9|;{u;^^"Vޘ< \P4?גQ)vtBnF>hF_kT!?,9n'kzY8\'io5eE!FR>pwn+d;|>ehG.[Ggva-s >=,ݛxZ%ZGX'JHK+\=\Y3%t_#&@=9|Cºe1[r$JQIdGh$L*Fz#gx_c)\Ɛт!['P-9y"x+d+E'1/[^ Fi\C) xqvpVj+UgT7O)_%h5^&\JQ Jٗ/פI-t<*7G@r&J 3ܚ9*i8U:Ma&QT "5,J4T@Cw9)A=۶.ԀPHiNA %tuw@dBcX$2JO0V"NP=AQ+hf-)͵~^E44DD$@0 Th wW 5jLICkQkfNm.h.FfH7p;Npа_"2 |ρBhK^ ;B_oT Dr&HGcHfT2[<ׂx _ˢ'1!,ڗPql N39 '"%:ZEp!D/PyhջsЦޜüd>PՃ/R'ie yŜmu%$}ܤ98ߵ҂n-\ߥ̿?U@{V߯?>sAd5}< ɏߎ ݺF#===^ &4Bi.H*"ϣbfDA i-FJa g!;\?.WHVNdP] Vy[[`ߪ.;2Ij<\:~ؖB9#H`๝a)m$n#, Q+MiũP:vRi+@woEܹL4\DZq Ml$$@A)KA$# -I*잋Fɲ{1YuUp>у)1 Yby)H)1QŐ09ɹCM cV24E:Ff;{8J=r$Bv}]%h,!Kbž hy\2 TeDW&@іr vF#Ĉo2*e}J^(-R&lbFnQ 't'Qj C%2H:(跱ir! .`ž8,ėMqm3+Wԡs#Quվ.4r+رޛQV,zz,ށ≿Ƿ#]P՛ǏSﳽAϳ^\O;t=\Q4aD記I'z=D6/Vx2˄MwI\I=,u)e_\b3si1_A~9X~bh ZmXS`F 0A7G]W~C&%rC!Kelg4;V> GgrHδ{J.}+xJN ݖ>(}W S .;ŕ J|ƽ9!.? 8A n,MFծc"r"O~ui6Y3GcT! m ص l[W%j(k N0&HKyp6\%:|Tje@[Ȋ@;_vڶ7;GTIgO)~G r᪅p9FJT9 ,!>.À Xəc7O;e4*y Htl2fOdO)~x$mbktw"oU9Trqpl.:u$aSo$Davd?kz0ŽjhdmSh ҥL6MP3<iQIHl  DEұR)jkgFW!W7 غw'^!I3og5 nB_398h6*&͆mūWO-sW@d?q=#`KZPoϽ|~ ֵ-MRt碼*͊5nxϔЖ/ssqp|ӤQݠԕGt/JZYv] ֡v)MUаo`ێa[pFLjF~R=I}nT۬>8j#8T+n@]߰@H_YLZu]mQ-% `~*t2xXf!{?{;ګTOËSfd?r? ?xrُ/zS=O ;IDuQ k[7ˎ/\z7nQ`e~x֐p#Sp9vzGQ Vѩ*xPib[ELT h7F Gnu1諸ݎAB#[ڭ y"%S$KM6D+,UW ]/|8)s.' U@ ?+knIPuD}hG{OQ$e[1}J)UJqXHd~_V֑YXW=j&SIâٳގTlt>/nZ=3r9޼x~5&WW_}U#8czӉ+K&'@ı%xM9k:A$|F`Q@Uӽ7 JY qMxiBVqPb 0 ]Czb7KM #.Rf$ @.5 z@Y)>1»*RNmۖZ|"؄|I=W5JX$`I a!oD}l !wYpؘ;\V3S DcͿnw a!oDl ֯8_x7 iwt|ι,8&V-qݲ)« R**oWʍYuЀQ͵ge5MR` XM|{C^.֛Tםiޭ]aZ;hB)1 s;d9>,jcJўw<%|G%IlNSW+ff[jBͥgi7SRn. Ӊ-I9| 1增>sY碖O7nʻj( 2'䙋u:G%(x]+ܕ媞 ajܛE4]ݠC_ Q @i4'8F"eXPr)0@<<15~Mĉ4o#>]j!pOZ!<jDC^<1z%0g&gG.dY9K$Ĕ:CB7BZ  BkcuVn`N@:DmeEkE*YE/~bJ[YLpPcV5M5Nl<4Q|9؋PH>WC?9?ĺU5hʮA>zܞxi3z/xۇM>l94v$'{YA템Ak^qo۷(vJ%Pw(XnMp(8n.e\>8ͻ(IަQ~pڟ|S{XG- x)841UXeg0M!c{\?8sW7Ŝ]E'yZ.Or" {_1W$i%IH9)նZV[iVjqp(1@kfnkE i=buYa_y6J ŠdDeyJR9XP04Sq^3i$"/9ϿeN 'CX-^$zjSUuDJfcxn?{w[hf?maDOx.3?,cF4nVGyA^LPiE լwi~ZgRa%䤂rr>@<)iX@>JYC"{qZ*QsdpNRɂR)%`_JU%%ե0 UK7?q^t^opt^} @Rw1E΋A=l/ ǀ50RӘ~jfL $}d \f\arߥ}08"KT'DR79ambMqX OhOn˺Kvk/“<-lMs ÓsZκtC tŮZ`۳E).mt3W+8RjF͞^ֆl܊(A蠲Z zk磜"B R+jb2]\RFMp|j4Y ZЫv8UکDh6c\~7b1SuUr?̗֓x:4ԃC^!OQjaq\K.n /+R\ݫxˆLg'D起)G0QЂhG4{YeZMILvB=Wy5?!~j ;DͺF QQ"H$TD f'/j数H3b5U. 6\JYg2Bv_SK. dkg2%w+sUO XeMR yī]^aUVHv*֠E4/,vK 6{va4ۍbǒRL\&*? (V Oh,BX`wwMewMc7p@}FfnaY'd'g▛!aX6۸y}iOflأqʿ/taG`$#ovgDB@ rqmvX!yy mƠpÐ?̼]0 5sWwjQQPtj}!5&7';l-59l~ D*gRmKWҋRmvVV 1IZaۖJAJ/J/+jbK_ZHMP;+%JJI}-5 wVzVJ-g|Zz +3~!5f݌V*W2 (zgyg* c%D]=O?aDM PM׽oRM St !pm9Q  ,Բ5(p415;#+]WdE;4}WN^Pp:e/şY)tE` m/tJ FDНB~Sah!s#eYE`G$gcd@qEDAF+Ekm/Y~N[1I&QsLv|xWF#;hOcYGS6!qMu0M2$na&Pܢg۶tPXhnb R; 7݄T]2Y

k7Z=Ku}HcXg l[8X8eC;/=GAv?ݸ'-!򌊅G!IP;qO5Nۃ;r&օofܨwtQNHwzcڐ!^7\Z{k¶IsSDŻCh}J^;(A{RFw?[J0K%؀^ T:ܙ/u;RDì7[=ˆP&"E4"HGyNoq0Zre Wwo!<}2Hp+م 1.]xYn6V$kn[j:Ye[lX@Ʊld:)UvgE$gb2ڞ=I*!/9FEsTv:`]o#GW9IS"=n$/Y]֖3Y?VK[dW[-Ԫf,YEcמ&ԛUa7&ŦBL)=\9Bt67I1T ڍ+q ql-GǥOƸw'y8{/xmF._ϗhE/( Td/?gGeFO/[8ڂ䰣w<) `K>o]dnI] n \ln`~ \5.a%.VU o&Uo2W?ݟ֒nf%3w\_\iS#_(QOlJunI(,nbLȓ'Y#OF45r1^X1x0zm49jB [ $!UMl5[͏5Nj> izw}hzz Ǚ^XTf;GӸ3y:|E; (K=CYˆ$TL+b4OQt>X@T9a1vs=C;hl CtO Aٝt+l& J\_rw{|ۡ <"z}δP8U(LbX.NBQrBs H Kh&Dܼt՜ZLV~mLb^dm]KU$ox}{M\lYp݈jyМE"ZBp1=Ehj7@+rY׾߭p[4a$M.ɠ@+߮}e7ʷ g7 weW9 ]A&ؓ|WQk9 8y7jd<ɪyU󤩚M\$N TQAkQ@DŒ.>u!MTb\ˮ/k[kLT4@z6pD>i`hEQE~$qNl"n#n7/勻"= mٕw!0_$]~v]ب ٨'kd b6)h#b樵p2(UL@ZV e/w5vBVy1 8< \Q%Ҩ8Q]m$R6T#wjlO0v>9vdˆlPP V-_qi HeVۉ0!՜k*tn m ,gk&A.C^=jLg@a9V2sFIπxv`PlDU l 2zpTb:ڠS8 A!6Nb9hwt6k }8 =PA4Y=BĮ1Y@4HT(?,цwq/ HP/^M_Ef(0PŻ k+.͡]a{eP.߲boqBFqn. qL&&LX< ō +-RJ;:Hi~>WцdV-DQɐ|l%)ژd r@ew.[WO8n- 8 ,^n|;*7aIAIjaLE^hH%CR&T,0E\]w(&6LaO  ]e%^#c^)u<מ"૊hd#6B6ʼaPH/LRr9W8`5zA4YyRnTȘ|ˮ9Z.uHZӪEf\M4ܱYQ|ۭC@Ǟ*ъhȚ p3󷞉1L<$Y;=ޖܜ/ъؼr8E|4txlpLBySj3]@MZI2!:;޲ϟ\D-7C͹ Lx[&z]um,dNP:on>i Az[q`}m;-N~i k39Z h_,"=krN<4UP DIHs-rDLU1z)"TZT!hى5PZwv}=j1`fg#\Z=1r=TRX\'p%`<*gtVGaZH#mAqL1wIϷEVTބ#}㒴VRNWquׁjdU(WRhX}ZZfZHowc<]_%|@kjM@ž8ٟI#p,\?N?wGǫw4D; e?"fv0T`Sv* 2 u*wK"^Fw'kSs5D9όIC޸vҩFCY7uʃ6mLiI|֭DK[UN!Z7 qZG֕%mio֭DK[Ut{Njޟ呴*WM@$-cPL}P2Cz!ю-i dPI/(3>DUq[Q#;fƮe`2/d&~`WܮX;;FRZpZ!J 0&'>T ,I\5I+oE+z]3(pZi#?Z+Yp.Ԟ[2np~n$AvZOC"?Τ0N x 8IJ-pEEπiGep蕒k, xri<6eeM^u>PK5miτs^"uzŝܸ=mKNV} d96;}<[T * h)-e[ I"ooc$c$Flk#O6DcPǔᤸv9Z_޶L1{ˠΘCQ5z_u. -Y`QΥ녶<x/rPk6[t}'P9/FZe6u^k]Ћ`˼%|[SQ}i{qF-J?ʃhôdw sА7t wUoȃ҅hU]?@3p5 JkzА7t =G֍3`ݺDuuc9oݵuV[hА7+:% uJP<͘T==? Z2TP8EM{Rg(N/f:r$>WoJ8Td/J}yPO.**4Oy;9yk̉lnvvu;x_~>:ͮ?|MˏsάaZfjDgJT^t{M4vL۬g/gyvSǣ;9w\˲D" Z`̌ jČ#/BIeyAl6r$!D, , w/VvL]wXh|pn|d*|K&o W!6 g(i/SBrmyuQ}5QGCseRG B (P̤`H}(=|G"";[2@IqYp^WIfv]q Vbm GgXPm- 96pZ}\ҙsڲ@a[RvL'm'u1j[!mØSJ{&PJ9UR1g$?fIR-'֢UAղ#HcX6XUV˒aE:ThM \W"$P},s Ҫ `b)~&:j飱E/'ܵ<١vzPCXPÇpE(%c=iX=b5o}_~' Jgu6V``q aؒ^j"9bsN ,71| ާmȑ@>!"䕇hBL'ʹgnN3b|iKm얾,0 !loׁ]7Wc(]=\kqD6IAhq$GWő8/i¿67{v~ua3~vҝƿ;i+>Dٻێw7y+/ G7+ǝ7`d>"!qw'3_" $1f^Ib!/O{ePXDQUNIZ־8Sk|L؇ R]n$>ZEqCRNaQGҰ(gyjbQzQ 2,JAfhH,"JAEic8vo|.m]¢u8f_F.;֥Xsgs]PX}޵Z?xXoF%̞k5*<֥âT7h>Z\zQ+< HaQXLKOX@eX6V -1J8J ٨\zdʣMSm=_O?cyRcZ:+y\*HW OdzS9҈nAŸewS5 )C4}"Y{EBºIp2Ba _XMKDߟIrV3Q*3:=wqJ'g:hK-j#*2,weVVUYa) RUfWRhw`\,d Rh-"OP&Zc\ yDj`q՝o,GgLpp4=*O-܏fR:H}Gp O*t)%8xT 9.)uq %L)"偓G ~m gr>[})!7l'guxqo#m:9",$(6[ .E`F"#z3&toP'ҋ(A_vFAoMw׿.o`Evr󩵪Eyyz7sS q;>G̀d|lU\G/m!sb"3Ќ1_z|}TG|f 1X8w5zuay+jGrfS\}&Ûa@q8d8z$;=ya{R ' ia0*Z "xdž!ϫGHfk7W|ܾih }7gږk|qajkVwγۏVo/~|Z/;we[KHϝ%1~d]ou!ཞritzclmʓw'R@Zl' uo׀郘n Oj\tZ5@˃_5x3@h10.dk擌~=z!M0>`!+'%PޕV`G*DB,G_=yQ݌}*& ^혳*Z[{[2 o,u=Z ⽟Q;qgW69>Y;:"B^yĔrӓ솺E3gvK (tf+O[6@M)byR+'ٍ%dT BB'1mY=]~60 !AhO0`A%-m_BO9t Sg**&$˩4NZnB,aҖ7\J_.h3^ rょ"WkFKKx `9 q>z"eb؝MU?~pPǼr=C%NȸH&c@Cªp$k,敵Zd+І(+[s2taY u:8I.{|5&g Z %Elue]mIT䕱du e  yxHe9H1sV תܠ2RL3̵RԬD͔-J7FA/7& #ʸ?^Fo,,皻vh_ʺK$i 5T`ӅdB 5"1JQJ[;2#`C@ ' "9o\^LWf*I07~"S'^!O D'}xzSͅTâ!=El jXd{C#&9P d-P`v"B^y)b:>&;le3 {5Va_oAU&D%$w~^HŞsI&lqŸd%eSZ 38eFw'3ΜJSOpHdC {Q2Hr{ 2hTIuZ=)k| yÇs hR l:lH##uW=|{RvS!hM VTUT6E+&%QijE+dalw[9.PlEU BT4Y[(!u5}3:RʺyDPR-k˄ ɊhwU@J~tB2 a@fmvskv]l J=W+g7WfvY@ Dƒz!IÚsRIB4XU;ؒi]m*]xTco3\\2̕4#iBF1xA W ^Fotc6YJ Vkes[k4*[ZJQjjq&bm,"cFIIkLojL]MfRS2)\+aL8 AЦVdnXR Bڡh S[ TdQwM918!6M4AS6h$4SiI3ve{[ÆSӾY/x{{;~[/#ݸݧo?}leIʙіHcKOW61v52 /,A3~]ݣ@x fD2Ff:M-<<:lh ; W4H^9;d2sp{vi+lrݿ{֡QW x{xKKY][oF+_N&V}10x&&y ^=ȶ"sYMJ6)SRl XvYUV==n}vWo:8dۼpYw&|8fji7SmP[ >K\sӄ%9pT{kӢ\/FTl:1'ԑt8-BuLKNۮ0{sQsA,dK} !\kb/!mSc yΪ 5RHˑHG eXً̗ f^ji̢ԝCZK֟/vsԕ#b`CMu*`LQ@LQ.,7.6E19w&+ Ŏh9@Y>;ڲ|:oD;g'|dB݈ޭ+6Mۘ!pޭ{揥wsa!߸)@Ipiw|ӦZOzE,HAM#Ifs"<˞u,դ$R3"'ڧRf0ƈ/1W1[)Wzw1G~&XD&l{{W(ݧ&B7Zr%9 kU_ Zϔ%Ӓ{&IikW}_㒖(HlXQLlT{X6Br̫Je2a0JpPS%GXa{| Ki$v]~ J ݬ!jCu?F_LsZ) \k߫jV}!-]#`MZ\b8!mQeǖ6$Cp$6wN 9:WpTwp[ ؍ Jo ;?j`Ȓ̥? (=du>ngp`981;RŦVDXq);7|1-:AJ nA X0JCO[Я%&`"^顖x,S4!ODD[GH}  $z(NR2M#%HĂT!DzDX 9䬱U~E6|*H9eiJ%T\j6֛lCiEOo5ӓm4GӗfkEmqMmMzV^/./|WR(yYG4T&FYcN e|S/2R뒃[FH H>USXrc `2 ^X:dM !Ȕ£&)M%ţ6lJ,7WP!1hmRF$#пwO{9CavZ|{'|;1Gq7c]ֻ ¢><_,aɆ7]DDmQ)224D|;1M!`J҉ꡚ[Ɖ %xhPTS&hD8$šP*q f8I$ "nS"M@/%GERVI![+yS=;, J; Oh/XL$]WT{{c` cU}[')g.[#"#{ߺv鉉^ B=GV0ZY%~ߚ SC+> fB{U(<§;f"on_M|EDb?6^j_9F)fOg!mF?T! wGLʋg\u[x:`^KlNyD9mAyAR Ϋˮq6۷}i~9(ĸ;f-W80i9]bW_>6\$A2@ahG' RAox~*E$.ap߬2ms/S {Daj=чcG|1X[[AJ f LvOnj%@{j{dND全$]Ve|&on@kT tmv | c2I;W|$07"z0'ۋdGYNCyoL!I8|AնwDxzPIz؊ >Pj~m0kkpGfTJݏ?m/5UdK@I8>8@^z:jE Ip੐­,u71LVe0U: S(emLڵn N鐘ػs{c)ʿe&q.bػP^Sƈr3?occɪ8\cY-b$,8_./TǑ~B5wloVAOijiC^3E6{DuO7z8j٘/FoAmㄺ\T XMVzVJJ)LRIX);+ͥfdgmY)>S\vlޕZ*N\'cЀi#t8`yH)Xe^]pV݋s^(/ bJ`Pl}WxQ%nVl;Q" Xgu w*.·ALƫgř=KBbC_wa'Ī(EReZD,"Lq!ر Ān ) zscc͑d18XY#+ )DKyR>w@*= azb*qL%XhXТKR $' 3%1 T%Kr Y2f*'˼ #5TγNJ)7V NJ)\jUΉI;+eҐ7.OJ\j*a:g+%Wji+K}+5%hRLc)X!wֆ'+=o+V}IMš# oǚyM,㆒r.zy#Rxv;2Seۼa?H-ky7]<Z_~0/k.__|]}]pW6_L[OWq{h]Av=}K ]|<7uu.?gX2UaJNjϳކօ dԝGWS }L2t`R1L۟}_A{nQw@ ]w)\'ҝWMQ7H,t8 [{!_8.13RS";OjId0f< _C>݋Q'uiL).jv|mM3DkZϽZh& E5M"sD)mÜӞĚ_n86'>wNyUo5Ժ8ڣl dCƷ Ŏo/̾^?fq˛#%;6H?a 7~Q\^&{8M5y|=}ejNO'=W&cOFڽwZ& l}ڄݵH%<hd餫I ԨTz#}++!冈Ĉ1$Hh8 98A""\X"1O--8DHeGׅQ#|"p17Pn&&)1G t(&*&iJ- ;EW5ȸrC|'}t37Fsl:JmQ4 Xű$b,!0# <&+4@k%Ohlc" a2u`(yi>iY=h)DevPȮå+ztM@NDyqY<݈yH[el,{f2X3%EӬC}ugw"psV5OAm7b?u_#HəBY_ܫ!u=a~LhV |߫qm-jf~'Bj{82o-Q0BWj;k՗w)v_I)GDVZ$Q 81[Z#ImLiȈƢ("PT5-sV n״x`(Tܘ^hh(ϕbk|iTa! h\cʸ7SiM)@qbkd1Ӟrih)d#wQ%BU"wފřȜ;pπ_wdyq>Nm]r]NKAy1Zft{6nn[\ jeqw>Nf:݄5f1ω3-xhaG3bJ׭ =kFӐ39ü5^sYO_rAmٖox%HY݄ED2Xމ aOv ̳Zpi߱µJIKxb &cq X5yLl#ڄ4cҴRV{jځx>^;ЋQKsOE)qԕ- 9"8)mل'V7GR<0'SBq+XRXXxc@;.dkD&鴅sr=eǰC*ѧSޡ%B.%QϚϟohMMX[@3BŖI%bCDnǍĩ[ETE T `?  qi BQ CA$ბ:BGJJiP. R 1"4lz6u46ǯx uOH#P3{j~y 5C2xo߽[x3awkrXV is\~4~uU]~uxߴ{%=>;o-v&\ߗ/=y.k\k⡁Yn h.ߵnC.O]JlUY x$KyY}n\D`f[bZW-ٙpt)dh]p~ !Zh\Cd{# ~sm-HjqDT+I9=jz5.iBy赩v2\]wg7eZ;^'ߒN-ذoxVyJ9Rc'WS}YOX3 oPLKc V# nT{(̯&BHVc`| {M.] Q긿F/nuhtj}ƢZlry;;jiھuͽu.Kh)P,MXB99KHq4d}SJ0Nvw*@ВO+sr33dޭ鍖 6vXn=Y#I*y?z]Vl)xF(&qd\&iF MyeCeЕ }pHQyUͽ(٢0և;|\!LK%j:* x $ Oo()Be)I q'T' TZ;I_}\4x(VQ}Y("Nz˫j8#^J"ē^P*(a) >ZӫzNi.;YDhDqa9hc0"<"TDE#<POiNĐFsecƭ;$%!@XF5P. 6<1Kbh,!Q"a(Y)a^P̛;*,䞃T<Ѩ"s%-|uETc ;RERc C`Bh'IF*\2,Q R 'W]3I)9ypeTsƞ*9ූJB FORzRb0pVljdR #f +%anj] S׎Lq {NWC2FfqQa/dF#M@Uʞup͞5~Ga' k5Zvg-.G*hG Ms}۫," NxMN]E(j(MGwpJcv98 QԈFQir@X$9 (eX;$/7Bc\ZUY$FJ ɽ{+iv7'^_6iT"ES~f`M0+Z0{2 eBd@_o*d5R19N3S!7a2Ef fIlXg\ MeTjr$Q>$KjsE/\tn ]cu,# rh_njj1O(]{D$#ò !ߵDLG"hF+3\oLIBbT(puki3;QǗyvJQ:{QRPIi.` `xw51C-o% cHfW{^& ܛ% ܠR"|)',`æ8]]co=h8]+ 740,MLncJCF``[B 대)uE'ϔcҕN78zbQFF\jVߘsUiUz.uQ(FF/Ռ`pL4H F0cFg0S9ۡˁ]th 8wQC7S(L4=40ayh`孔yBLq \hQ̷G +%9;40g;-k'…v' $\x\fl7}<}յ 2y4Cun7e䭐o4N`9%%>R50sxaTy,T=X;5ό/5qP=j"T(&c 0֋lJʚpVSc< & Lv)u$0ag K=H `ṦJ|؎P~[xmh5`\ j~FȰE"k&؉l.FW!~:3渂+T jx!Iw1F]@8fx_nmP~~#9"i-6Gb{6v# i異\( "XoW/P1;V)-޳N9fÇӵ/-o= „DWh?uSku]Ix*=3_^Q*. I[#%1ô٣c$]oUytHp~2pq祦DS8Hb3D$ZLHc ϸF85>P"$=:<$'BD#-[vĠ}B14pUq"Sz! ~%ޅR05~P%_J%l@t 1ȺLtv<¿? YNϋ7o?"0%]TPZ8xdד%ݸaIϔPt/' 9pRU ū$qGhz͠lD Tf7I,(Wސ.)75yF9oR%eOeR#Oe˙DqVBoM ՠ{*1"^OLYrOO/UièO2b%QڑF8/UÇ:]vU {~|Yq\ѧpg4Et +ήqO,p5PTSDv'N6^O]YdwtL2  +s*g+ l]Մjj+μh]qa#UxΩ8FW A܉)p#s%U+:P]/YB,+;$étHZ-4R=n9M'/ŰMc?+Vi'C:!T'˺A+RV8o /J-IsI)A&"?][]6*>ߴVTl+El SKQ%Z,6ȳv~4{Xm0ttbH]JH OJgV&3QICd~2p%&wmNf^uF>Nlg+5gj@t?tF;#}X\,Ƽs=^ BRI^t4N4`MFY`y}eht*T(ä-&\]>NQMgx.LvFXG&m/ND.`T KaiGnEܹiRvu?tKGpME AXE9$=5($6UҝAJDc (膢m"Қ e, enh qCYYRahfL!ARxkA"kA˘ZZZ'e \a-0koF);3;?i\`'Ci"ч/""YDVTˁD`b )Z,ݗ\Ŵ'vɚ}퐰U)N+m,zVrK"8IѸmm.Vn|G=0SDJlW]7~XUD"i)QJ"cEݹEA2Wgn뼳b#R⍣xbj%S{ɸʂD0Jc Aɍ{Z*gu,x)A+lT*6T"Zx.V3 J[ :Tڨ !f.9HKIbx] ww K\AAΣ!Qwn~[kvxM߷%|_ou/5J#:,Jjw^_J$5y}.pp4(N-NkTrQW )s\yONQ8tTmQHX,P C-~&I&&^{EGz]}o;kLiUg]-.ƍ+G51Uӏ7mk|Lkӫ+GK7tdGa4ٰݗ%6qKޞNVf/.h/o> zOtUçW7^,윽 b7_5$bq翞ir+~'~ԿDrSG\F"ZP;WXhAJikY~ד<_G:ްPCJxiNȞٸsHV T+l]Z c0wi07T2,EVzєM@L4T|9At/"7/.GzUu/?%9O}{|fOi7͋Aai?_LcBFS+F&"BK"p0Bt[ Jg!:+4b0@9^R{*UjSNq G]JfHT4@(iΩ;_Ev'i\VrMP:.ᮖ_j߾WTcR| 'LnZ`x5g"KeAhκ+?~|u ﮚƖDkU{GRFVhW|q=,2xGDu~zDb;_M&xmC$XGoO46vSFQj= a0%!)~ "w0&T;4q VR*=lvv!Y|:$jVSiav10ӗگUf#g,U)7șs<(j*;x.DЬ%Ͻg+5UV:0VքegKXJVR;:T.DkM UShKE-a*1LEa gL]]WR´Orgf/Ɠ6HBC֌CLs.6gT@A4뺧ɛyO_CFN; dĸ3lZ'+NEa2:li̇ej#ѫX5We:_u(̦G*(I99=XвUp. 3*\եowX%g}S.ێ-~,uy{{y*4kA)‰BE@;ye\v^Oe%@qh@y"Z+Ξ1'hAXRO k*P[eApEŭ8 fD"mZR?{WF T˩%3$}IA"%Ԑe"=HYlRZ@S^WgfShqW,fH2JcZCX5Wf;vwѧw]aN8+O@wJO|%QyRlH6Ѵ)EA o}K瑪EC#wne,APj A2H&^=!^ǰ5fKuaȬv8sFOxdjϔ\0qCz2 X3иi#1E'mnqzXgSrh6B~tĴ2 TD[H?q5>ȑN&fQ`#e]n;7LHC\(. FjDCI`}4JGLX-%K`Me.a7[λߙkJ`-DߣḿJ?i!thT?K y;!PPj:)EPǾPWkɩq4y 8kpufY +d5H2є_Um꺽6b@$_*Pu# kn SЂaH/dG2' V‰[nc0nA$]?J8&-7HK5ܜ+9طjw҈V:PX<ׁDތΖ}Glnwc߮Y*JKV'C+@5su=c3Lڽ5`b–u0ǐXH<\@I~ꪮїSZ39t\[ϛlK{ 8[o?}Wzv{Mn/ߞx?}X__onr+@NP0M[{iݩlB\vfbF# (a{w!kW|[C*noPZi7E0n:9|۷?zeI`}TC$ԛB~>Ҍ֦:} vew M~[ϼ=YM(ҲlAךQTC $.U%вϋD+~ZDvϕ{~B8mիF=Oq*R|CB?711 zBP@i-}j )iO׻;TJ欭 9oOBHw"4#&F"VpűܵeCTJEy?-#5䍑}}EI',1:Kxv{~ptAĉL+d<8BNA]f.G!)Դ\3T!"D;'BEm6}È2H/!ai8A hɉ= 4#* "5TTO*;f#d!e GtaEj)G ġ7O<(钠k12h.ÒeնP)k hd $fw pX)Vh11odN".?4".DI̩d[ fY D?M-".9=/y$UӀq $1EDM,OeL ɹx"d搽Ge 8`NA[lDq^L-Z]#tBfWBfa>CCh]Z*դ!؃ 2. `ڞc0D8\*0FPrGuyO<3LUAդsk cH^VI]ܰ4C526%"=ॷïr%ecgt5GjOT6Yn<{> Y%!QrB.'xgC3S;or>Cd]fŽkn$& L&1es_E'߮j5+s+ŠGkDf-;cԫF% ,  (%V2|=] Zvw-XGAi~1r}M^æ7Ȧ/hT}$YQN.~gMKzFɠ WU$jY\&"^iF2,P讣R@ a9ؓMŖ$_^´7;!R=#ZJ& ΀ØUYzJ$42Jp$\b6J/.=|9#f-eO_tD Sp ADN8$1YphG 8}儑u&tJDT6X(1DHMfMF y$%͈noCR[!YuLEnVR&櫪yrA` Qz*%|j$"A vI49ԞD55^ȡf3<-6Ydi-Y諫JS?f4aՐ[uD\q2JΙ+)2F{wOܦ3 B7|Q BV%|3{7{Tjud7]SĮs6. " R9f2RI иgiPN|uJPT/iL_ȗϋo3 )i,єMKtk%`mym g dVYj ۘsSvHRբb|Z.^s܊܊H1+TOF6]Ũ{ KPM ˴Ft$1OQUP@a4k;Fol?IM 5qytf/::Hc0A~:{P8`N3/8cr&,w^EtE#PvJ`-Ȝ0C(`,yu`;3@ )o6&.*~hm_Uڃ͇f\/Ǖ:(:29FiT*KB砍 6J]OEH׹ۿ1ʆ#2' dO.wjF1Q1z")gDXR3] 2цK tO[ʎ8c7Y9Utm \!EnQ2&2-g}kݩb&oՅ߂RuJLYRo{L-(,[n뤿 0lu\MB"|gY 8o؅hj`"R-٥;U'oWgʲ)xjc\1 J(cykDC5j fo*e0@̍_g6 7z(CMd{kYm*SBwViNHC._:kbl,v\0˻O~g3gr `+zk "b萫MȢhS=*dӑ_n+)TvV$cKI*GA/d'>#ER_ B# ^G<?Sf Y)PAGG𢷡>}>ݠ Ű1 vb6D4Ů_V//@S#w=NՔzO;G KFl0Se3Aom[VktT{@<jc5ѓ}eu2ءY75wnx &r*+SoqiQ{$cDk>QjVh nz>dX(\(r  o*s,MG jVI|8szXWǨd H-Lgu~s;B-jԢ~@*P%Q*m<}DVB`}4JRb$G ,,WȜGov94Y}y(- $\,iWn{n v<+PrAa5ߠ K>ΌO9۷hk.!?{Hrz/^/ xoGEv wGRY(EVQ\VX_~<,Oj Y6Ydw3(5*.ĊT#πIDQȘ`Y+umȚ?M}3d?0] +i ;g473@Ȥ)]esπ.RKSaBKziqoU`"0~絾y*|q3,/fXc?bnƿ٪=?&̲?<pvNy}!5_ٗ/𛼢`??(IR7IsiV}U~E+4W0a7A( 琘ThaT7!aa>0kɓ[#$7ъ$chQ&22B}ȵ FN֊bΠW-'=$=I rd`j0,'հhȀK3H-(f<g(LKVIQ7cᚈދg4~90 k٨VbltL̟skhޞ@5~'Um{k?#}8kț""d+IOd4(JhVAқҊ\a'$3->)VRi%VWUI*91&zsώSkQ^o3+:hۋ.d`tAǜF=nhJXO8:l8ad'uhYى5άv|%+- "'f9X78yn`ʲTYrX .u DU$ArSMc<2rDn/}x{~X&^ͳ;G _ɹ/E=e* rn#0, #E{ⲂF.k47>,~?;,E6{)v~ŹPRaȂ}-dWS hS]})*. I+X]& >g91c @6ˁ%*G&BD}%m$GMǥӇAg<*uet& nWu|#qTW\YF{PfK2R+M1"%NXtC$n8D VP\Ts(jp匭u]J$zp1X9rڶx?9J,35$tc,/;ɚ{v)o9!DW9p>HRDnX4`h5ADG]a(5&21 |&@Z)mHeQ *DuD;NYuKEfQx&e d3Īx2Z:zB)Qu4]@19B2%/ ]F&v˾£*q5k8t&i8XBVA['JUrf}m/(Ս)w<d72~zSPNRp.C&NU6h)!|IX;mo;({$;:!:pB(p8հ*?vc9g0bxm4]9"r6*_@"Ke +1{6H i;, rHu>`B(}vInIԘ (&wBXd vg wXgpQX5Vp"ZQdދ)>hyG9s%'G^`';ld56ֺXyb.ejDA9h$Tfi!y> E ·ϵ"R6d` ́`ѻԆ GuN&;'o:-#~ouJ FHMu#{!+p"JUV P>> DmIIgEՀY6_n48b+pRx`;mmH& Y=B~8Wܳ=Z KyhʪKdW\%v>kn"`]ʹԓ=!`!ou.\;! CiiՖZ2f ^T)sC|gqvx;vB޵uڑei0Z>{:GyB$Z"WNGmPZn3w'HI"&LUɅFTc _]p-Фs*e:7ċRs=jbxauY&RbQZ9dAZt[)4~MisݾԔ-Ybu+T-PX1$] [X Lxl'B%BXlޚyL:f?QMm^QSPA!ii-Δ܎J-WY@3AICh|9/Wt 8JG@ρ$mmjNA1-~]IkXDs8oL<*2?Ww8~x;>\ I'(@R Z.-RWBh!u''W֑k~_êLfV„ʦx_kktxc糚kaORgp)m#gxiTmI{kYTQg,8蠍 }ϡCGh\;@z{(1˘q*s0wwØ |N<'4lNB{cX;l܎R^Kd H0F-,K^ -Gfhnsᗛ!jSw]ǞS֓Af]W pU-3CP<^GViYfc6lp,SoF9ks59ݙIKnEJ'u٬3X4}8[ y7eJo#d\Io@e˳sd(pi ގcǠgD޹w^Ruh8i i'5狶4\$1:u?̲رK*8T7}X92%&ڳ5L-5̞0+V-q(a0<Ժ9hR|RB}]ɑܲ/[6ؾ [6|-j/PjK$͝|th`} /HM՗yț+&F։ .Y}ʞR9eI|_nO6/rsz"_(p'ͳ#M;|iZQV9KNڵ[7UrD[:nu#C޻]*0 S2TgBTAlO~C}4!M j5ɴL9xr,3q3TE'juúj?ӌwS=f_N2&UپҗDžE"]oQ2:'a4OڗXvO9F4 Py,CFGm_둁>ʧ4ÚSmT{Yw#*A6 DTN47~ˁ!NA<5lW{\(I#r݉"u92W'Fi,֎&.m5 TЉ~LNkto ְśG6 8;-  giͽw%*쵙OLi%ÿ?]a&_p9\o.P=dG,}32*= 9'ډ»?V՟8صQ,/?_y2JMʐmb^@T#"CN9ckG}37צr[-ΐg,39 4⤷V^Se$#]mrĭ@ Q3|k9'  |A a~ʋ֒"Aj1TI` e".L~Icb@cjMt0Gۯ5@QJ1vZ4migK (lvX]}5F.R$bg,..~lKL!;,E85DQ 9tBnM"|!t%9S6ASM FAr ecBȞ`w|A=8ډK5͵F/R}"{՟ǘʬ)ƛ$F2h痽>H_hXܮr.N1}nVCroݐ zvCz G#j&ʔIDR#޻H7g5 G$ԦB! mz.E-j/_w|8ކn*TI MT!U7Á4yA%2tF͊{Ba۶} Ci=yqy .V'2#rտaS>C=Oz 圬 [n|7/EW-v' 7|W!AܭMl3WyCf-]t)( % w:g9xp[ w_>~Y6o>}]ɓ۴Iy^96ۃ$Ҩ"tv=+SS_3\h`E>Yto|~]4([)"i-:7hz6.bΕqڠw?iyW_e銡o'˳eCQɪA,\IJGYxZ{Zc+4o`H<ۃT\u cIlkP<؄9~sfp?ߌl+Y{c{VzTj'Me벂jQ%Iq% &f#3[YSC&4nzu]ԡLI>aG#E@y?&_t Zyge)>szǏE] 8;P8wXN0o1?-Cafw؇w@Nks$+,#89ƶ!$f E,t}5RZ6D]L`JXZrL &^ħn$3׬ySr&1B`o/~|(2razӴBGBY%Qg|m.nه~Jy!.ފm:r4&5 O8:0Y:z8=Ħ$d$s5[*y{Ti\3%qvykG Xc܌s[h&L^b|ըmhGd_DKe -Dׁt "x(=V5z){!+0zzgP7QRF b ?nL̊xpq.@GLʊ\ 7q`+6݇υgLGAԿ(QW*>X..OdG[L(ߙ~_\l ! |x]\ܴ,OO{l5cLE1'Iu:#P~~v'Ύ;.836Ab0^OP4OM:kڙVCRMb7^\A5Mkj2,;pt:LHFJ~%Eضxph]Aw;_׌ؾځMbXDI76bZ*&*Ic`Sq!KApyFX[x+ \y`<>&#^-1hJ&Z;4 d0I8Dt^q{ѝ]">͞jimJ.Chk)ې%&u6-m&aMV+iݝn~H vm*3+/]o!KGo4;*N6*hTԵVzaR$vNv}K jJbtl ܐmN_Á㭛12 yE WKbk̴v*y++3& {tφQ|OJ&Ѫ~\$_ʎԦDhJ؃֍kfr˫f .lm ZMd!bX^!$M%l%j}b/z;[UˠRBˢUCP1 R{C7 ѫ Nx/?:蟕7WY¤sRSy XN6Hl0e8e*2{*hxN!2H౮(T4Tw~-`}' bC*= ]V5I7^\)rq_ۺ3 ᳷o p>Cr@rap/}~ﴁz2Z')ӓa W/3t͎RtwZ(6ԇ$k U^lF=`Ĩ>lz0u(I5֍*@lJRS"Mk@b` Ze巻Aί_9n ~$YZQNu٘VbRѡ'`6Ybgf[ﳬc9gݬݳ=(|B@$BuS7V>PHaj$`΂ug9BS^Z*'*#wj.y+E:d>1O>at%nk2Ͼr> $X%.--)(H{vZSCH%.vp.zeNI R.YҡW *ex0OZ}AL ߺ^WnV2rh) w2UmKYoשeK"`Ćp<Bd׼߾9}{ Nz9o1]Y/K˓}ҦנHmJr\*R_{񆈪{w/usO%'BM8p{9ffjBWҤ%T$kQ^֒z\3;xX\¹V)}"? ?͟7Vr.x>dO~^&vE:@6T(N"PbG؎pEc(\+hov..̒*iFP4ѸV yiEGw ^nK; RS,9zQi}cROg֒Ӭ>fy؉Oo9ub~Av G =zDkU&IXOɆ% E3v м (RQNz12Xw #dlG0tlwct{ sG5`Ar]W0}AEOY):$Mxިo-7]^f hV7aA e8vABd纟>HJuŎDxz [Vh-5Pf6ri0@`5qK|^\izy3D&<$Y*)*()~T۹ub(X?ᘂGtO+E&c##BO1XfIn RIkUIV`U)d .K#QON0Wu}m~J8VpᦙNCXɉڽ &wnD7snF-Ή)f=0,)aGP.$2LTCd d d{0ݒ5Z4aA\"Q~\+n<_3x:9vZilmLSEP-4PVM˧'3^2eNE` 1IN%.АF=}Kr442BD< Hj'v}B3ztB Kr(&q'h9',V/_}SC y7aTh O[f?]#@z(ԒDΉSoMϫEvيxYşg?zz0Qۛ[h<:<1vn6wx^P- lFbW<ˏG<>sYLu|WB~s#b:' #H 6ef)h_+/:a6Jrx&s[RI8=<`= Y-|խopT>ܡ48Axuzcˋ\ܼe׍Nʧ,6l޻:<NvyrB?3vBm$d?.\C.Fg(ý08C(Q kF#.e"+F12ھs hy: G'OM0Ȑl&7gwB-{^lcaub/QjT rI_kuLd4@ PNtٚm\LJHU Yq"քlmΌ c-HGjAtXY wv̳^F kW?GOeɟ/g^}9GgDm&-:}?c^aLrWҎ~8ZHo~8n7>]Ȓ?+P4R(xCx?_W.f,L1[P }(@PV-B4$-g:=BdhxϘʧOaa/_^d X%^MPӤwmǬf(8.iρ,R,DDK&!FDrŽ0_:'ɽZ卶9X΅1yH.ZVMJN\2@qƋҰ-5kEMEX%7b\W^fGtm84ٗX:ǟT`Raqพ&ٙؐVD:ɔd-JҪNVڞ26ӧPi俿v A]h8t?ׂ_ , Q[elSQhܠ.]Fvzys˙naoAƞc/]Jj&c 똟E.ªݶ|Ȩp5zC g0/ rgGP<% Ŀ:GL0xMܩ@N>N1jw6_! Ŋ;z뤍hC$F]it1T\V98 3JmUˍJb9aD΃f<K .ϯHgg@8]2ڨ&5PN TRtW3§XHׅfZh}2gN_P5=r7Ȕy))F#we.E9W,r,n,;%^r\ =bYpܰ6 Tb ,TˢYB  Ow@ds1H0ңNy% }RU{j:JЭZif.m&6qIC=소U̠ƈ]9otlGo3z>EU7{nsݜv7 ~!W`;cS&[|>˜644NJ8뜸N%rvXP ;HzWc#1@־;q@}B>ӜFr ˍm !k<\wnF=v@g{GDvܸz٪{DIbMe*a~{B$]|LN:9WS3  >ORE%_g7C5Nnf䦔NDƥrL' Df.(58_\σr˔dZm٭ELjrm[L80Mv'"7vC4roK  'N5ĩYȄx2#9MF}!TwF&` 8;ے÷Cw& lU8QVIIA`5Vd V9hj%/a_Z&=^N6vh(supƘ;~Q(@j "zvo:=pihIYm5Fm7y 6s20ZyCЦTh#)MEr-+&߉+j]+%3 ЏSOFry/9F0 '4&sZQC 2Ef=A#{1̙:}w ٙ/̗XvqO&"oכfB$dAUܖJ".$31n9"W@Ք);C*6mK"@E(Y%6 3m:%cqm׆wƭk㦋AUR7+^I>ma'H/;($;N.dͤ~ai)5=0i]mVclU% f=тGb24`zAEܰ#o۷M!Qs79EV۶|o5˓jz—a<ٔ}Fy{ѐk7CE\11Lw8Lϩa<=eAB٫)퓢5@JC{!RZ}B,օ(i<|j4?5=7f?]#BXcC,ujENY?ϚmeOZEBlFᮎS꟔ࢌ+ Q"χU<)6 Vܙl41r)."njS:6ug>m~~5 tRSQc ퟹځ!1Md~1|ѝ塆[kdI޵4$bӌveGjo/%Wpcl6TWI78[*"2q@q,)"h’ N6m XٚwJ)Mn5-`,aKFE!ȨUwxJ9G8=22#5 D2ƈ$*ƤT@;)ٛ#U?@f#[y|"PyhY騭'2Jz*Q-=&21! N3Ko38X&1Mn0SaJ=f %)ky>Y#{5/Nt9*_Z֗ 긫!a=FP{/qimȘ 0ʃDL""VT2gfnA8ʑv|x)|`'M 碟 EhZ *^̽nDx!*Qf>%vE+xe u!j8 |> iĿgԆTՂsdu-Vc ?lG0HEMrHiOu$IIpy)鈽. Rk )|Z ~ޣA*)L2ĭgn _b[z?Afzlt1+O~{rECidr"F8-:!iMGbGĖ3aHXh= V $Ŭd6rLy2܈u<ǔaS->]+{g2ZCETiTō[o_+|+%n15 R3aCɹ%EB(tX>Yv?p'fy7cjW }|+GTDP"^sOWykn޽%ęG1΅ʬ.6p &lҁƏЕN(ESCIlKB݂` !+ƥEip4[+4$BQP$K,S ":ת 6ܮܡ!47;b"Q[3zޭ1-SLqZ+t6bPEl/a\KI۞ow|8A- [ccnP&n@-Po!"x^J+'ڝs41.YCl8D)LZń[6Kg\QRs [XHKM!`r_iHIΡ&m@l3hh FW;+6H~G6\WR (O]iOXK<]W =uT F dpў6m60hֵ& ŽCǘV# SJtŞx+!㉽F+eZ}>%=k/csUmƯJlB!4wLRKyfEh1 Mtm%1$`i2h67K4Jɭk_mKyvr!fl]z|PSCGmF6kܺJ\j˪gf]( VqΝ^4-tG Q ֽΝ: J^$˺Zޝg"aZkoHR-IRakk~£/R=$w`!h9!t;5_[AH*EgBuBVJ֬UFl ^C/:Tp6 =u{Aװr}:VwKc?ik g)){..*T; j@PA +DV` 3%&3Qe)R9+sh#M|׋E8ݨs ;I̍UliZ`Sm~UPOrpnN(zn׽bP=nEiwoW)\/<4/n68}OȔb2F(o]2Py\+PJCcX 3L9~Qz. E9S˜qGpJL[.O,LE$"!@4WZEd)juͿߗD}G=/y1lϋa{^5l)ہ{)Dz$7+˻Ϝ2*?ۉ[lr{Fc:F&rg)9`v~}$O?qM)L?-_QrY}ĝ]BX|zM\ab/&%-,R9+K&9K@ J;A|8+uo @{r$]$ dS;U]S m M O7|ޅ4#zG,,3m"z@fA6mFrFu{m}O*`Ɋ6]lkj b~#B(\QF8G뒉Ǵ=peJoBy1-"/ϖ{$-ƋtćLh$Ο[OW)hK?~k1M_?rE^)E~:G\gurїLTDB|4WFM2E hDn٨B`d{{$0et˛n>I2^ S!oho*\bH>j^‹K&%H-3f-7́=lea./mX1Ǒ ,2kaH%WiOut+kT$ҘK~fJ=YYuV6*`zh40rТwi^)8E!9:! Sd}ZF[nB<3 9J(:I`@5.[CU4Rj$ ʘ%,po@ҭ)\0|ޅV=2b3DFʄGQͬ ͠23Q%=GB87LHQK-Z: "趠e-'}zu:g<{DsQv2ԅ0Ti5s{s5/,YRZ]ug~-?gOV wҴ|z]N>  ^yֱWWH] .*̱TTB؟^`'G}I3wh.*G2n!:,;?g*/4by?ܶpKu 5;5]u?GVFuM9HIHJGdJ(4zQD+)bah`[܅pNuKP.@ZF3VkA4Xx| -78Ԟ%JMrW*^b@"\~%&@@U{5̂\3nծ3>;c_[qmzMtmap?"7-AqX 6 lWq[W ۊtMrԻ%*;7?}rqz}ur/.}%_:G*fyhp;1W-Q'ixkm85X}2\ujctYy"(hֺv̸bʿg .lC:MzGAR_~o+ʨ"L$8ȆԖ89b}^FhR%^Vc|E~ݺXBN }Q\!AREm0As9kY$XbRyS9@a'KzfNbŘU,r@~-]nku ﹨:j.-_x\iIF@w/i(ʼ7ɗu":NUE٨[h*\Ȍ\ZY$1'/u؛(=X&;Q{A3";8TBd#uz%8ڃ^O[ נ+;a5ZR+),iFL)iҎ;,b;MURben@e2xʄz8UAߪc T& P]"qqbLR2R8q0q-sVD: u ].YfN)ow 7CNxDG$ dMc5ֆcg͒X3GBfgT*) ۸yxLm㜎חCvUUj @Ej\$^E|QBYjzHr%P Y&wўHO i®@ozVA@!|aklN!~gTc X}E`QHVv}?I: >*f%JA@ 0mu&̉2J*[4LrBe"DRpȕdN8j8DjE]C4!$^j㵅Hq#c U2瀎lD5&J++wbH&Z%`mX_-VrFPNKcd,75>Hd :cew֔Ŝȟ24˄VM<%[n29kHp契 5 ,Oo<4CLdC2QEq+e.%]-"\BɘxL[WOE-^!M8j7ێ mɭAa-iު݄b $ͧF[ۤY`e:F5fQ #:Mo&MH*7nqJdV{lm 蘢ǓԠ";ͫm"<[F$DO>.lE8~B*>6MtaFԑ8#-LN㦢"=1JHL͐N52HlƱ4YtuY}{m4h#K`8"%UkcD, DY|:}bT,ɧ>'ֹB'oE4,A3gZ(l fau6C$^BUFc'YvEXYo50H!or3 ,y =s1me%'qkB *UJ6i6WM˵͙ ^S hapqâeww!'XGLuMgkJVvKV[eҼ>i!f$ DR3_DGp\xTz}\%BfgA+t;B`#8S6'ʐ *E2}!KVz;tH(>"$b4vEd4+o5PzlBE'O@ GxF_~X)\C ޞn\U3 R= C kK|q;YX ri"}Ӵ=-$Z`bh>D\! :p-$b.6QcbBW3 IZĪe1qh+'^h·;D !c>]^4!3~<-$VGǂP] " kDkKSPsɣQ.BO8(N6h㓪FC e-IC w ej&C+ RW|j!>~cWņbNNU+%SJp sbaX сN.D'kNIԁD- =J[o^8f1 Ljs(DԬLZ&3 3' 'fvsy*1b_^>6s{)$նnDe2Xmslp `-[m8w~_d!'pYKMY1l+7qWqw*R\ZebVƚe&;CZm&"mײk-sZ , ("^i:->Z9ndag IՎs0NHzP4H#}$鴟- 8V Ku0~m} \Hp{"XbXW}6K;^Zem_K$_7݈v9i*ע%8HC"4 +PX ʬXLx܆Ue# ]uB 6y|@h(в_!"/kU{2>9$OtqH=kdᒴ.}ZږpsM[m?dL*3&0Z%u>TӐ,)dEBdtXԆN'9l;N-z[Iޟ^/f]V4@hPuŹ7N7y{|muu3jr9%9\_= .{q}ܼ} t,zUs)̇i3KGu1L|o5e#g0p8tSwkFQ3}$k4.v^eh,1ZVIjEw!W Jlu~,`Z53ڮ>g>D m7Ge}XzN"Pؾxd㢕2Es.:~W;VùfLW4ͼ۰lRJd TLRr`KV1(g|.BZP!9"R6 $9xBd'FsUB*O@=+TB@INPiMfΦ`$Tb ،ʒ^#e QRdQH%<'e6RLY-;} 9"jo$c+jHjoLd"JP |HWw -qmcFaR/67,HfU*iA30T EqB* h DɕQB^m ҲP@.LdǹK7G @&kiru WmC+A=]znUgZڐr"}+اgnI8 wG;LFU9붉f֯keb|kzѶ_HhFjH*y9 I WȍR!`[ܩe5:ĸ9a6&{#8V|w[nXgΌt $6(>>y~~*??;ڸ~uڙ`&}UnV.U.=cT`&m$,8d!Bvt<߲㩿ʳ޷v?N wIѥ ~o$hc4V~?|7Zm@{v^q_xOGxtmyA {{.=[i|=uV0pcGgŐ&%zhG5 rÇFQӲWT%*ú094 5~ܳF5^'->억opn{OT ?6ucڜd ,gcmyWS?r| DDrڀLz[ekbJ $ YzSi8XAf dvFm3BPt4]}N=r&0T@b&zYtR22h]oGW}nH! d"@r68Ł03#k#\r;jMrHg#[#GWWwU* $qWM5X&U<ƂRz8ګ_^3;٧?o;l:Dͽ?O6nځ X֤ٚtDd(H05VibDAͩ)ɐŒ0p'l$b8] ZR2kHE;[S5Qq T5j*gg̉":|kGp2{?fd|/ n%8RcN\(_`Yp5taȊ K0F^iev??M~3ce{;t¡e%â~N/],8nlL^˄{mݝd!ʴ*="}m؄m+ջ n{eI*K||=3H]R)tR?v](Ew3׾{HP>Jaf ɪu<˟n]8$WꕨlҘ#P4s:64ɏΈN\OP6;?݈PE1S#n~S!wO|ʾWԹ8#I$^ .?o]bˁۖ`~~4+ -mr`j>UuƙB""7ٜ.;ޘ6JԷG6^oIt:/CK.-G@Q2@DKE; 6Q="D2t=~9L%9!o-44Ǻ',^ٳQgmցO;3R>P \tVX쎽HX zN([mik@:bCiKϺ^>Ժ(qe] 5JA&ǰK2G;Xl 3R!QQQJɓL."ģ㌑嵓Y04T hˎrV"|hZ_*!y-OZm%~( B8q$O7p ͽ^> {ԅ{k*../ȁd]DL`T}Եc08͌FO~>|r7Ĭ./ԪSxZ OD ORy¥ *T. KD}1<1. ,VXF«C>Z,tyը'BR_3+y+qsBŹ(4G(#[ޏ e%=5Ke eq ΒRT§)#-Xh,Z`\nup}+Mΐaصsl Q4EI(WpcTA3vV@}3*IEAf4lJfDK`׌ A]OAmK~.[:;x~ԗ 傥hcÑBHTscL2gA0|B^n&~ ?T1zf9F5 3Lx}ujuۻiXz{ ުwo"O4,i'}\JVƘǬc$غ6m]l]Fsʸla*nLggM])EJPP@#x`s>ȬʙL(&BΤ2T$M9'uT#J!aZe8w*xMGSw:qIuTaN+Tb놵\"!;DD09!Wb!3!ݟbC/;\ "CqPyD6pZq73P*쑿<_M]'縐vpdʴEHXf sIs[R%Jn\ 4>*XǍKky|'r\7ג)NV{oY˕l,e2[` y{ E&x|X? VY @ aVJD& %/;WpHt\>@HTpnڲ@7k]h,iE[03W ~E54<I,q>$KMn}gd))J4g4˅B5$`H"2b)z#ļ;Vʾ0LK4,y&3-(Zk+LȢ,\PJ)(m >FsgMBr#'uX$qWE~\ŋp j|ͯSf| yB:jD װG?]-pꂀxpzlac>+eQw޷{Ws͔LgT,DLHƔ+{F1uU!⢨{%{^*Ԝ]{iVJJ̕#uE\ȏER%Re V!Ä4-gVrn@it_>_ [|SI+Rn2͕54G Xܢh $`rkȥa4XFhh|&J"pmDFdiq?I`:pnM\.wg .jVr1tH /RUH<U *oGUyU - gn]ÁҁGL`ζ.Qs 6OcǚOӱ/'=8"`>eq!4.igp8./+%,.`)%Y6m>qԩ~:2_E0R/)xI,P&JYvH4%Br T?$M6G* *wH雏,&E%@`).>?ǍoVh>ow"0:s,x%+@(ᯟpɄx{gtItɀHk4 0:r 02nFDB=1²6aJq`&5xjV`*R˟b.<.e1$Ԕj OM ۨ!RMZW6a9N6YӭK|deT(ȬHVF%Q,2Zu$!Tf1&2NudE& ʏ㏝HY zK&PaY`irBMrA+K4Z2RTH\KUf<:U)-YKp_C$0wlVKu+2B>42]%~Nol L^]LYG,^g4igS8/!.ua]Xc1ֈ&Mm2⽶B{֙wv扲դ!_FF.'t DubǨb1L1nϺCbZ:4+W(~uuAĎQźAfݢ nuhWQ:q;MJщi,BT'v*w$\*iMhuCCr)SlLyͥ7aC7!o(-T j9~!m8w'dERyJbIԃ iWl:B` b5Ϯ)"fv#ĺc1 +;Yd㍰IJM,pNLl%;L` '4͹іR̭ֆmׁF?΢aF*F)2WhR5=ż#44: $,`m"mZdHMQuʥ;@#֭U NG=2s8s9pu W87<0cjo>27%Uod^zKK}sLfۥ_o* )fa%"Ha-u8e(?}m(/0g7{o`g2X刃wC'RHe-9ܵ?jzhjήiIY7Ho_ ll{{ mP,$p/8S듢g&{ 4ٓg\V: 2l+|,fR*` u ?F&_?Lk/qi?]Ĉp?0ӟ"cN.@:=WhU`1BT!)BPgZ&}M g&54׹)cD`gyB?P%$$i{B^d8Th"xkz{:Vjߘ&面( =Yi awgxAU.W9+֠RW/D9>p_τP&2k.`vsw77}?G0g'n?).cU'xJjrooXab W8FE1},}dAIV:. #]1q`4. ]~!sye.12CaQVKEMVz&>dx"TAboX+ ;l;sN޵2N1Gf L'b3Ybd?(cv4tuV wxLuGO8>\nT;\_>^l)XrV3iJcj׆E~}dXA\2U'l_\9 ^/;էYɫnۿ󣐯B^PݼOPTUΔZQ!DUH䑰 W\JY%Բ+5Q1Vչ 1VlfttَE3_27!]U^MƬPW5&&7ZQ5V'o^ G۞A|a;wil$6h?:At0 ʩEN!iul` |3g* 6 f $npU:$u(OF, E)+=4 GmP7eYq8hQ%ֈ '-orÍݻ.kgof!?6Y/p۵y1p G/򆞲t ΖkEv+@*ϡii@YyQڰ {Kh0!hѩB:R%5Zm]844m';Бp@2'H9ՆZ}.9:ihkmͨiYɋti`2KT 82(Vz-/?:huEW7W@.Y!Qo-[/.b@VirTb+V)rPdM(N&IP) ul0OXAGlBy M(Xny@0ÛөAO߀p m,{c:tn.tNfs/t1N3p lA$%}ZOCJ 1TH&`]?;j&330!W]jB!1No3x4?}nEܿm982ub8kb4 pOG>?`=%IHWA:t>zͺAm(bTg;R.)m݆__nch_\E׬*`݆ Fu#źo20uiА)ºrͻbF؈60Қqࢺ דX\P> q[70_"HHOTgU8br/?l OVPF3 4M)~˶J+fVVUZI,Z* `w{3Ynq^ y 5Ժs痗Qo+uW#j&#4]W]xzrn&cx`G6vH]!yBɓC3z}ۤ1(9nx}}Xmm*/E,MIga2s+|9$~oZm#Pl09q[1bѴΧuKuatuZ%HdD&ĚXL;^̣O? R'mzbcT F>fZ#oiّKcӮؘ&L ex ou".Ĩ%F9I\GժWr\M[ .8TLџ~3ǫ՘ /5O^"e lU E|]d[ޱ~05];b䃩__ڏ(yl;=[DŰ_$j?Qk?=bUHFiO-Ѷ6:8h4˧#MrzLay b6WF277U1+a>,.΍xjU?!8Mrs5S4kMd!䊴jn>(Fp_*r:m@ڸ2 95q)M*gT%QAi,P$NVS9_4X_7gh5}fZ#Ѣ4DⱲ}x, MVr0.KJ#NVNK%gQ,q~QÓT)PW$u~ԍ7'9^FW,\w71V5KD[njIbGbhk ~?8BBpv\,8cF#l)U !$nKnG.7QpSD)oMUjhYj)Ej)PMò[ '_.slYì^^-l|z]3q]5X] uEH2R!`Y[>9ԙ\mRRjޱkys.١ y6nz377a6t2q'Sw 2 U . Q8ˬq^oI@PJ=ڊSa0p 5;,ejxSٻ6vDb|ms&!Me(@'4vt;fɖdXmRcH2+U`N-4?\Z4!G D9K*e,M R I9JROJ U9X-;PffyOKs,GuO^Va 'H&n3F* KkFnu9<#(/Tuv}^^ O.o0Gml,PgOs֌ͥ:?l{ճj$Ug|˰NSru ޖ'۲4X[|PQgL2eJۖK? Fb؉!"1;䌅ʩT?6D3-͜(yʛ:f)1tB-RW%TsSSUBlЊ> 6]=^і{K^a1Z|U`>HYI4y?Y𱳀.v?8vFC>2b4ۙd(pĴBJ9?iZ[)MiGHVX~$ݭowcQ*[_"Jٱ\&fEަ<1\9Sm2s<>!U)+4BD"M]`uqiDdBs.7F8rHDm#Ǹm[̺虳"g?œ#wtKǮ@a&)n[ԱIMU v[֎8Z 3tr7zoID_! cfWKfB6{wwc=r)oG-tI2'`∏ך4{ͽw<qO=*ZJt00 %q2)o=jM0O1E5;B&Of틔4=m40]Hɫ)ͤh7FCVJ0VN9gӃ3x^AJɀ^k[)8y4]qлfx:2  N G*@dV;k#;#bSӨ_$LGȱβ"bJxEKxZK؇&V1y?y6EN_/ЊTOePȺp!׵q=#rVCj_H[[[eXgڝ>EY!zKb%./TkYZ^4!~Gtl$ EAor\ʲUv"?1ٚH2dSW( `$YQȹ=A W4)aRQ=g# B,*i _zIh3a5'{}=FLP =x俴DMaW<4# zauvwwO<]˞g|x%/z?o:De4?I)#_~LNٷMAmi4ieuv8lj8eƚEn]quT1B̂a{G/'Oq|#1d=i:6L&9# ~tTIெigQsqq@!ppY%]RвM~sRfTO;V6,00m\PweH`fd#/ \}KIDL52& M#8HI;/m]S6\ʭ!&81Jobq'E73A'ӁbKs{{Gtœ'_&(g:-,ޥ~a}U.)qb-ޙ~iicE+xhVsU+1{ؕ3coIn.rNt+9e/j0ȴ,#'ldfq8擈#Hk,b{ I[@j6WU|i̚XB*<J'OL!G%N@lbP$c$+6"b#3{>&>V鐌eH8[ K;\\moGᢐm@ͧG1Rps)csOĘ|}Ed'fEVPoL5 *87VUؿY'5LmLPBữes :.ۺpcRV0v0h2fQpsi󢷼ܬZjw&V|kEJKV`[bi2yֹ4Ji9Vu}}>!a({LcN2g%$[FCV[ o/|/Zҽ[Z1cK[upq߾Uo "~t|;!D@mUf.ˇ:j)4`́X*6Pzxk|^|fKM,kRBX֜^HzyejEu`%R Mz\bNpmKٰؔ3NLXe@;c8eI?krPdc?X*v 2+;+d}X[K{Vn69; >+d2 =^`qc2e:\m̐SBNÒP'X٥9d}b+*?'dWwFoz^ݦwuR ]<OqZE2z[}YvnϤm/G| t*Z{vc:GM<2mST\ߵu3|K /XVVs !ZOBk1t!!:нe XAh4y,98Rex0Mk;,ܗ[RK12j~m=k41RedB1<j0.s'3J8ԐR ltDg0)pnM+@ͺ`/M ۰jQ]za#eTU~b'^$[H)[75E&j!ݮTUHtċ`o#gsTP7)UnR[{R3TĴ>h; )5kWW+OJ-7EJ.Qۖ<۽ܽȾ.tk`}ZޏE`t29 TIBVk7<[R%M`;vUpDqZFU]H3 UJQ8;tԇoMi4.cAnP vBdWB*c4=]H+;y>nD&a32 JIX+OǮ::ق^=\!$ V ͤQ:#I r:|ǜ^z'HʿE~~k?wD?]J]dJz^_uRtβֹ褩 = \RJQu3QJ.Zm|v&&l/rdB,wmm%,i/U]m "X$y?$eTӖMk$hyhfNꪯR61;F `5x0q _ NmO(@+Hrt"6Bf{1h2 0Vv<?4 tl}NK))`5^~! 'O'N.6bB/ȳY,i;1K,J݄m+x{mZJ4kYl!a;|W :[+x*>Sn!zg{DV#!Xc(~'`lq8DI"PLvgzDb=c!PxT{ow~Xzw%H*Clg"|;+5zz&/G;&@ ;s0&jW 0X$ zJbG a߿$ΉGYPu5~+ՓJG OJ~֙JsCX lDȢw/aĎ_>ޮNH,KHYaɓ+'g~sLK7|u!Wم|d3m/ XQ@g|q6kG/6(mx-eb{Zy$z2ayQʀh5Fs ZE K@y hyn@XSǑ< a^;—[lk=*pYOr<ŰF 2JR8ž/Ri\ZV$z5"ߴq4TOPչq9jIN eh+,kjg4KO2z8IB鍂\C?ò+zDcBK^o6]_1q5Fe0am~ע/̑knfd247uouz_:橅.'!CsݟדN';`7+y|t8eG7>\.NY`EKYq&T:VrZk~]Qv"*h ʊ%cB$a T؇ A+݄w%"' ! \_UWmsȦG+1S=]Bb ({~ކd4o Uhf(6F2~I(5dFȢެ I[hDͿzy& 2:uWW9{_$177Q gĺf/6jUmJmt%S^߷B 77'/'ԧJ7{TDE2yRoltS\{9@.Z#U7㙊|4?Gh5q"D0\+Q|(ˁL֥txATBr]>GZz>3#쏆ju&6I` [^]^s ?6~<(񏍹gQm,"FD\*(qN•kV_e.TXODvLgȩ5X(DEBOccU>&ӈ=jހ nEL<4]eY)q! RwPg (aYIDVUҨ"3LϷ(HTrr(D!/VuCw15CS9\~K^G(@QB*M,緭<96 ͌|$1p*Il:t94n)AC8Qk6]Ey$+uQ1 )xABBjJ/L\.M#5'Wq SVRzv! K1ۧvm`8Ktsޯo<>B吚:i߯7 rTZV^?lfWA)? DB:w<@$S2) /Y1'c!wE˄S^ګ(R!zёƟtx1h=>{i∴u֔ Lzl_h'W',4OW m,8NƐpOD /8~ޓj+G1h&lXkDЮ(@}]x?%d..QnjQB72b{PmyuyS/$᣺Xy ۊ b'+25`M`}vmQ0N. 6//Z!x1l+SW3tuCr5S֥}RL| &XG,= ++n=<,Џ0R:<=8ԌM"1-2餼u:VyV'V^iRMkYW>\% bS~p^``=*8i a**XgXx\HxXh?MCRB`GlT#4&-YM- P`}ef7TzЃCX1L;*ydAbiVC# @ό5{cg@(t9rKۧ6wMOG}Ӣ3rRo'djF50unFb73s^06&VtZ !6e89E-@`j.Y6oejWUZT[ cIcT~ShW (9 /+)im/ϖ5&)$K9&fzG>wgȂ^ϭ2dQRJtl\7I^$z z\`Cq*pTD[xA84D" ޓ>rA1$lLąr/c5$@Y@rRJ3"l0d䢛듏C:* WUG*j!U-h͌6ƄA i.3sA̤Q "T:}3 tG.j,^Y;e#f9X 4lL-><]A-Q>g7gޚ{^s2<;k쑣 #цyj~>}nmqA^_};t.GqCՈYUbT,W 3>Dx']]2D\R-Io >~]f/-ͬ3*]^R15JH u-;ÙG#Ld+~:'wi@m.*zҲ"yGƪx4pj3'cdlɊS XfQdv~ua`[KĪȽ>yIE q)`U&;k0 GٻFn&W&F2CX/`If;Mm%GG&`[[dխGLFWOu7fs+Zp\ 6dIJ @F{ݹCԧ nЏ wwt+?7d~hz' ' 8ظo'' [ V'uM/d㾷_`fI>QrdtྜV2Uӕ>o[EU|^mɏ]!5.0j.*|—95E 3( r ]3^NT be 0XTJ L0n2\:"L%L-qhW{AyIaZ dJ~(膟x2l\|۟\ogB6z 6fؘR| ")]~2M 3Ho%`,Q2Ȃ/uJO#hlݰCp5ޞ C9hZiu*Np Ȥ39`,G 5nN_tw&iNfj:ޒ.a٢57wte|j4fcKs]!`*+T.h$`va)e7ot löj'qwoΟV"t*@04ƹKG)mGb@ugflj1@UT>D/fn*MԈ33δ`R#/<ЈV$CgE"#*i|l qα➂?m%>QB;'5}eLߥ/LX &}J~@x0]A߯11c{4JoȪ8/ߟ}|cy"FJ 0gt|&,v~'fx 3w zo}l{ jX nsϱo _d|WrvU;4*h 'j@g.$>R+ 易 !L&(7{:xi9MgSe!PyCIvFUF21{;j;(.X@ExŧW =  mۙ7V;/;?gf ~&?{?l:b 4DaàQT(e )9,EigDS4!6&q0bBsEs5yꚯN:+$t6  EÝ܌?Dƀ XMNk2p#6UJ9dsC& v_ԯ^b8_"jӦH`|+Q۫y´P dzgB9`uf) 8vrJxfv Xζ^Bpy'b'HWX`;ҕ6H vk\?^O|$Z>:,]xb!(C6&T"Bx$("EyڗT`W)[V(Sh~aSּȐhM-w!R)2V1rsS` /Rϕ)N=>k%=#G0k)%CPGK Lz@)r`CY(k(ƄqQ~PFM+/P:w1 K' c~ ݻC&\ѽDY Q7o`߄TL:9 `THcAn$=3sm~9Xpf`*Iuu Iy4_;jC{na?.(A!I҄@X4g!Th =grAgû,Bx@$r+;};dPH|uūN-&|wz߉DDԢ]]Wpuie-lqFCH׃ͬFch`mv(ePp0/$8;|G7hEu&XY+o/e $[Ѕyp~$?ޗ,# Q=;s6vsnؖ[f9p!'J }?:h)L2d ogf5HpŭSb(!9SMq-+mn%{n%'uY-9[uX̭Y\ߍ]hET*){,XB|l|D2 uv ًò3CW_wn:-DH>Ca0z֝$ҰW)\L:-xI(PJC5~|cD%ӛR_~WHVTNMuRʣ%竽&p!~Vٔ7ől8If;O[;͛pN_GndLzYN;ce BÀgS 71y~.h|4p 7|B ҢgtU{ Ӻwu-˴8Vuh=wW~\rzVpSL}Ϻ-=uATmu;rfݪ,nuh WJ:UH|} hݪb:U(cŵڮ[L֭ y*Swd )LGA 3Mah$ 5_fx'O–BԖɬYdL9#-E"F@(D"c% gE4h,6E 6fECAzH} D|eHDx"c$:2WbPXQPv4WAۮ3˅ܖDڃ")8"(mg pB҉i^ , Jrح{jvk9֪eUmip4ɽV_hF֡!/\E!g{Mf[U bTjeἃF{n3XFZ:4䅫hR/q޳nX5֭*1S2퐫"媗ڈnuh WѭuJ2)5>tR ;Foa1_ib$bC1OXH`>|%Bm*C[M/L}=_pvҹ1}s5{c-&{6{ᏱY J2f[Wy %o_^a6Q4Hmӡ) DJIFBhY Bbl/J yjd:*,ߊ=MNIDbHID%'PK!4/A/Ef(gk#&xƉ+9܈)zѳ^ V[:dQ-L_ov/ag8OOj@J7Y*Qe4 VDk6& A1~w:$#|lE.O Ř: ZY}q|\TMxd)$)zj-Y?K:G#euTc"euޏb8xJ `uy>$Bmu1S AuN0.ȍ{QXFF0S"^HMˎs(u%? ׅ֨_6$M{~Mx9';N;hdj3hs^XsVLK&}ƕK:Ν0)A|%,ԨP;0V?^k1 ]Wxxo]E$UnEXX$pN[x+TKD}& h2#@O͌Gfӭ`[cz+ \rU =E$EyȬJL{9()C+cvYpmIGUj PDz< I7)!MRGA GSz3QM6 L 1ϕHQ!<\|9&yNh Ѹye`6:)z| GUeEd_l6Rp%R"!T- b匚,e$% '#c% qlN$ O:)R, iWEJT1!JeNc^=wi~? };q̖rX.~z7)i))t99?!KPo>CCnnODY7 S.?i=EL>.d6)@L_^۝pݖm.,'NT/dկ>ccEHOk:b`ɛ$/V^X26װ<RcG,XVlMuȕpj>[ ju ة$t^hMi=uzǻYBe1wx^V{xn]XnA6UjĹݔDˊP`L'>휻k':ne@Hօ&eS[cbLLj iM-%V_5ž*ˮ |8a2Sb40)M2TYȘNHA \ !}Obmņ<=!h*%c ,ȌcKNaKc[ڃy-`l:+uԊt0-> 3&g'KҪS>tmD2jzKKu-QwӶ- ?{{;+r ѹWUYZj#ͽ* Qunܝ^'g^8̮.naZ#4 njCkg<1>'[N`0.,7 tKծwAw;Zԟ10آ>% ۢ MtӦٔ cNLm8Qm81W%"ؘ+j'SE(CU|^RVˮeNkh5z|#WU_z5wx @ ͥY>ۛ3oW1k~U8hNG#J}v+mE?OC5u] 7|=jH2Zr6x.ZpLke,˫4Lh9XuDd-KȜP>k  hECRƼzW ,~_~*ba3{j.y) qı) Y: \Z34@yJdHa] KoׇBf7O6Пv2qV!:S\o ^1386=%!x]mBjtqK>N8q)U{uuϣ~w}}7t,Rof7yӆo]%.x*x 9/{m!;8OGO;׏7g0-}scp'}$F|E I-21'Hm b+4Ԫ$UIb-r+PXY*L)T:TSETDܽ6mY=/akʁw`5 JS}MVzV; u㬾y]91ThsҋR\97unj!VzVCtx8/=D&Ղ#J/JYPe\j ҒjXu˲R񚠒ܸ1Ts+jX)hr_tG;Wdh:{r^0Q>nVnFz5H eYCkk>'7 #tUwzWU>c6zïbJ3JX*i&JLAs͇חpQzT3"hy9QB颖+|WsϋUۉM0N9+`AK,Ooup ZY+dz@ Css5OԢ2M8*u3^?dLԵ3 ZKW&=zI fn՞cC~#M:cH=jmus W{|Ev.smzI`a,T]HgO)I(QB(\ȗJ[#3l(rd"?ÑEL׉C}s/Qo ǐ4eDkmsTj|!U#֙v'~U@w֩Ff2:Qe9,RO[:\&KH1sͽ`O 5]:zt_֝|},2:" .9؅m)?DR~ݹ3u dl&xLqd$ce],hqABFV<Y2e @aﶩ6vs:`ePJaۦvi}KRa*ZsZ)8+-R\!D/JK)S!N,,~1<~-&?r)vhq3 eUQyЬ:ҳ#XT4sYҞA903E, cQl ߽2J72@0낹pSp%2S"y֯Yn{>JOiIV9k,d_;Q*W9zޡ^E:?^B*?>Z8^WM:@kZ+Jv^v^Цk \ׅcRw:ԵkW/nzpmd 1DnjIM !cM[ىH"⬐&v"$YN!?NAC"DZ"*Dha}mfw,$X"@S:s$#`E"tRd_9+*A+ɢYnbw`V\{ף [t1rOэA+[ѹkcs,EꊺVZf2G)֒գ\GRIFQh?zxqb|LAJ`? C]|ͶCjޟT )_H]Oÿ^#DuƮc) E[ۣ&z\ }r 󶶺Iy=b =ɨГׯ>1dOm)y04n/>K xy/GndtQ3wWLޚjR-wuz6Hn{zlݝl(cMexG:C T9(<[OJ&"]!k܍0km53/R2W40+ gV\2RVK2 _4JK*ZY)78+J9L6Ejm|79TJK^jIWV0 11197ms (12:26:49.585) Jan 27 12:26:49 crc kubenswrapper[4900]: Trace[979788949]: [11.197750534s] [11.197750534s] END Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.585255 4900 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.588377 4900 trace.go:236] Trace[75376506]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (27-Jan-2026 12:26:35.599) (total time: 13989ms): Jan 27 12:26:49 crc kubenswrapper[4900]: Trace[75376506]: ---"Objects listed" error: 13988ms (12:26:49.588) Jan 27 12:26:49 crc kubenswrapper[4900]: Trace[75376506]: [13.989003003s] [13.989003003s] END Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.588417 4900 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.590883 4900 trace.go:236] Trace[379049336]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (27-Jan-2026 12:26:37.394) (total time: 12196ms): Jan 27 12:26:49 crc kubenswrapper[4900]: Trace[379049336]: ---"Objects listed" error: 12196ms (12:26:49.590) Jan 27 12:26:49 crc kubenswrapper[4900]: Trace[379049336]: [12.196724326s] [12.196724326s] END Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.590913 4900 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.592101 4900 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 27 12:26:49 crc kubenswrapper[4900]: E0127 12:26:49.598782 4900 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.611397 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 06:25:01.153450862 +0000 UTC Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.611409 4900 apiserver.go:52] "Watching apiserver" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.619408 4900 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.619893 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c"] Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.620487 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.620504 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.620581 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.620676 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.620787 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 12:26:49 crc kubenswrapper[4900]: E0127 12:26:49.620783 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:26:49 crc kubenswrapper[4900]: E0127 12:26:49.620925 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.621371 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:49 crc kubenswrapper[4900]: E0127 12:26:49.621618 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.631857 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.632544 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.632587 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.637574 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.638085 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.638130 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.638584 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.638819 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.641180 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.671142 4900 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38098->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.671224 4900 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:48502->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.671236 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38098->192.168.126.11:17697: read: connection reset by peer" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.671345 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:48502->192.168.126.11:17697: read: connection reset by peer" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.671922 4900 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.671959 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.703120 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.716530 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.717179 4900 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.729329 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.743126 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.757529 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.770046 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.782654 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.794143 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.794489 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.794542 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.794575 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.794617 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.794650 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.794667 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.794686 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.794926 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.795023 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.795433 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.795561 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.795606 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.795652 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.795673 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.795708 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.795755 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.795799 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.795824 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.795844 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.795880 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.795976 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.795868 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796045 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796093 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796165 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796046 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796136 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796404 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796442 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796546 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796569 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796618 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796401 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796714 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796750 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796774 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796798 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796853 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796885 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796911 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796935 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.796961 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797026 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797173 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797217 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797239 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797299 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797324 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797413 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797455 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797485 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797508 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797527 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797551 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797574 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797603 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797630 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797656 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797679 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797699 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797721 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797757 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797779 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797804 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797827 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797859 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797890 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797916 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797937 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797956 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797975 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797992 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.798013 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.798033 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.798077 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.798096 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.798116 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.798142 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797153 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797227 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797269 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797334 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797680 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797695 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797707 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.797884 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.798068 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.798128 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.798158 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.798364 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.798410 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.798962 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.798993 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.799033 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.799073 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.801127 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.801168 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.801373 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.801785 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.801987 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.802267 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.802504 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.802628 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.802626 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.802644 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.799423 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.799453 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.802910 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.803171 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.803291 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.803357 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.803868 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.804015 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.804033 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.804122 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.804246 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.805383 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.805423 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.805398 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.805610 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.805802 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.806324 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.806386 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.806632 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.806681 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.806712 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.806736 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.806756 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.806762 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.806838 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.806860 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.806902 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.806988 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807009 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807048 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807053 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807126 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807185 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807212 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807238 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807262 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807278 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807317 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807620 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807674 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807831 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807969 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.807722 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.808418 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.808534 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.808657 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.808758 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.808925 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.809086 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.809239 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.809394 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.809505 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.809645 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.809756 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.809843 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.809880 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.809969 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.810093 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.810462 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.810604 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.810656 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.810667 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.810735 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.810902 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.810954 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.811869 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.809861 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.812200 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.812315 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.812491 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.812587 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.812692 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.812794 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.812893 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.812997 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.813107 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.813499 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.813910 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.814024 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.814150 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.814224 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.814317 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.814391 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.814918 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.814950 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.815026 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.815032 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.815149 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.815250 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.815219 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.815523 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.815609 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.814392 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.815896 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.815969 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816022 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816044 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816202 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816277 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816350 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816422 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816496 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816580 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816670 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816753 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816829 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816898 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816967 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.817038 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.817137 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.817223 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.817297 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.817370 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.817446 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.817516 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816311 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816611 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.816813 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.817072 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.817525 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.817710 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818091 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818190 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818404 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818569 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818619 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818649 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818692 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818737 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818748 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818833 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818864 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818887 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818906 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818925 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818945 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818943 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818965 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.818985 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819004 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819022 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819019 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819043 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819216 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819211 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819256 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819304 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819317 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819321 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819346 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819380 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819407 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 27 12:26:49 crc kubenswrapper[4900]: E0127 12:26:49.819485 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:26:50.319426522 +0000 UTC m=+37.556454732 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819541 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819563 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819582 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819600 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819623 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819649 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819668 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819697 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819718 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819738 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819758 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819774 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819795 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819815 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819838 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819865 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819889 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819915 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819940 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819996 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820047 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820080 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820096 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820118 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820132 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820149 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820165 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820187 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820205 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820226 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820242 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820259 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820276 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820293 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820312 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820329 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820346 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820362 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820415 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820436 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820457 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820483 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820504 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820523 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820542 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820595 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820612 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820632 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820651 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820713 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820742 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820767 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820841 4900 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820855 4900 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820866 4900 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820877 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820888 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820898 4900 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820907 4900 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820915 4900 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820925 4900 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820934 4900 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820944 4900 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820953 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820963 4900 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820974 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820982 4900 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820991 4900 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821001 4900 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821009 4900 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821018 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821028 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821038 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821047 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821070 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821078 4900 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821090 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821099 4900 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821109 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821117 4900 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821126 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821135 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821144 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821154 4900 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821165 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821175 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821185 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821195 4900 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821203 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821213 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821222 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821231 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821241 4900 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821253 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821262 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821271 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821280 4900 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821289 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821299 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821308 4900 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821317 4900 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821326 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821335 4900 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821344 4900 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821354 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821363 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821374 4900 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821385 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821395 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821404 4900 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821412 4900 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821423 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821433 4900 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821442 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821451 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821463 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821476 4900 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821488 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821502 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821513 4900 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821522 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821532 4900 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821541 4900 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.822337 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.819664 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820001 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820252 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820302 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820621 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820648 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820678 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820796 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.820916 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821227 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821465 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821620 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.821690 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.822021 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.822042 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.822102 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.822145 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.822484 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.822479 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.822515 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.822887 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.823508 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.823523 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.823572 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: E0127 12:26:49.824235 4900 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.824286 4900 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: E0127 12:26:49.824615 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:50.324586834 +0000 UTC m=+37.561615244 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 12:26:49 crc kubenswrapper[4900]: E0127 12:26:49.824733 4900 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.824911 4900 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.825289 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.825779 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.827508 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.827553 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.827678 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.827873 4900 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.827884 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: E0127 12:26:49.827977 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:50.327960224 +0000 UTC m=+37.564988584 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828293 4900 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828331 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828363 4900 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828386 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828406 4900 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828423 4900 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828437 4900 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828453 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828474 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828492 4900 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828509 4900 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828524 4900 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828570 4900 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828591 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828608 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828762 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828787 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828802 4900 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828817 4900 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828833 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828848 4900 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828867 4900 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828882 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828897 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828911 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828939 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828955 4900 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828970 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.828843 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.830256 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.830316 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.830573 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.830973 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.831537 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.832222 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.832398 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.832754 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.833196 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.833241 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.833327 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.833341 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.833556 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.833600 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.835184 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.835518 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.836480 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.836585 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.836644 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.836726 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.842205 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.835795 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.837497 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.846419 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.846809 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.848229 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.848395 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.848571 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.849443 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.849480 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.849543 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.849696 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.849755 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.849784 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.850197 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.850348 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.850480 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.850681 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.850774 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.851508 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.852001 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.852399 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.852440 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.852794 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.853717 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.853781 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.854195 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.854427 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.854662 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.855507 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.855677 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.856197 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.856430 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.857045 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.857423 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.857461 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.857492 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.857575 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.859667 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.859819 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.860101 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.860874 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.925322 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.929926 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930000 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930071 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930089 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930099 4900 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930110 4900 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930117 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930137 4900 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930209 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930222 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930224 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930234 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930297 4900 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930312 4900 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930355 4900 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930375 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930385 4900 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930395 4900 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930405 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930440 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930457 4900 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930467 4900 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930478 4900 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930487 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930520 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930535 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930546 4900 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930557 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930572 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930605 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930614 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930628 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930637 4900 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930645 4900 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930653 4900 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930662 4900 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930671 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930679 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930688 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930696 4900 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930706 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930714 4900 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930725 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930733 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930742 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930749 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930760 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930769 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930777 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930786 4900 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930796 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930805 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930815 4900 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930824 4900 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930833 4900 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930841 4900 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930851 4900 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930859 4900 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930868 4900 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930877 4900 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930886 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930915 4900 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930925 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930934 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930943 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930952 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930960 4900 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930969 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.930998 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931007 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931015 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931024 4900 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931032 4900 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931042 4900 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931082 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931098 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931114 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931126 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931166 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931181 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931194 4900 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931205 4900 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931216 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931262 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931275 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931286 4900 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931297 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931336 4900 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931352 4900 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931424 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931443 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931456 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931465 4900 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931473 4900 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.931509 4900 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.965620 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.968033 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.983562 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.991958 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.992638 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.994494 4900 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b" exitCode=255 Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.994550 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b"} Jan 27 12:26:49 crc kubenswrapper[4900]: I0127 12:26:49.994608 4900 scope.go:117] "RemoveContainer" containerID="ffd73696628b81b83b2f5c6d9ece43e7075a7bb521543b204ca627c00b370948" Jan 27 12:26:49 crc kubenswrapper[4900]: E0127 12:26:49.999297 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 12:26:49 crc kubenswrapper[4900]: E0127 12:26:49.999369 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 12:26:49 crc kubenswrapper[4900]: E0127 12:26:49.999386 4900 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:49 crc kubenswrapper[4900]: E0127 12:26:49.999470 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:50.499446801 +0000 UTC m=+37.736475011 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.003654 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.005507 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.006878 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.006906 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.006918 4900 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.006969 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:50.506952222 +0000 UTC m=+37.743980432 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.007738 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.013218 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.027995 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.032426 4900 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.032459 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.040478 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.056297 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.069414 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.081824 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.099803 4900 scope.go:117] "RemoveContainer" containerID="1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b" Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.099993 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.181252 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-22mkn"] Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.181755 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-22mkn" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.184914 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.184968 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.184971 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.185446 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.233764 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d3d3b849-9dcd-43d2-a8e2-26a3f37ef978-hosts-file\") pod \"node-resolver-22mkn\" (UID: \"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\") " pod="openshift-dns/node-resolver-22mkn" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.233809 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqc4r\" (UniqueName: \"kubernetes.io/projected/d3d3b849-9dcd-43d2-a8e2-26a3f37ef978-kube-api-access-mqc4r\") pod \"node-resolver-22mkn\" (UID: \"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\") " pod="openshift-dns/node-resolver-22mkn" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.237808 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 27 12:26:50 crc kubenswrapper[4900]: W0127 12:26:50.246717 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-2d48a13f01c133262762a9cbc014764a017aae3731e03ee6049d6658aec71724 WatchSource:0}: Error finding container 2d48a13f01c133262762a9cbc014764a017aae3731e03ee6049d6658aec71724: Status 404 returned error can't find the container with id 2d48a13f01c133262762a9cbc014764a017aae3731e03ee6049d6658aec71724 Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.248865 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:iptables-alerter,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/iptables-alerter/iptables-alerter.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONTAINER_RUNTIME_ENDPOINT,Value:unix:///run/crio/crio.sock,ValueFrom:nil,},EnvVar{Name:ALERTER_POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:iptables-alerter-script,ReadOnly:false,MountPath:/iptables-alerter,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-slash,ReadOnly:true,MountPath:/host,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rczfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod iptables-alerter-4ln5h_openshift-network-operator(d75a4c96-2883-4a0b-bab2-0fab2b6c0b49): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.249033 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.250470 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"iptables-alerter\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/iptables-alerter-4ln5h" podUID="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.255702 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.259540 4900 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 27 12:26:50 crc kubenswrapper[4900]: container &Container{Name:network-operator,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,Command:[/bin/bash -c #!/bin/bash Jan 27 12:26:50 crc kubenswrapper[4900]: set -o allexport Jan 27 12:26:50 crc kubenswrapper[4900]: if [[ -f /etc/kubernetes/apiserver-url.env ]]; then Jan 27 12:26:50 crc kubenswrapper[4900]: source /etc/kubernetes/apiserver-url.env Jan 27 12:26:50 crc kubenswrapper[4900]: else Jan 27 12:26:50 crc kubenswrapper[4900]: echo "Error: /etc/kubernetes/apiserver-url.env is missing" Jan 27 12:26:50 crc kubenswrapper[4900]: exit 1 Jan 27 12:26:50 crc kubenswrapper[4900]: fi Jan 27 12:26:50 crc kubenswrapper[4900]: exec /usr/bin/cluster-network-operator start --listen=0.0.0.0:9104 Jan 27 12:26:50 crc kubenswrapper[4900]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:cno,HostPort:9104,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:RELEASE_VERSION,Value:4.18.1,ValueFrom:nil,},EnvVar{Name:KUBE_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b97554198294bf544fbc116c94a0a1fb2ec8a4de0e926bf9d9e320135f0bee6f,ValueFrom:nil,},EnvVar{Name:KUBE_RBAC_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,ValueFrom:nil,},EnvVar{Name:MULTUS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,ValueFrom:nil,},EnvVar{Name:MULTUS_ADMISSION_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317,ValueFrom:nil,},EnvVar{Name:CNI_PLUGINS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc,ValueFrom:nil,},EnvVar{Name:BOND_CNI_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78,ValueFrom:nil,},EnvVar{Name:WHEREABOUTS_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4,ValueFrom:nil,},EnvVar{Name:ROUTE_OVERRRIDE_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa,ValueFrom:nil,},EnvVar{Name:MULTUS_NETWORKPOLICY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:23f833d3738d68706eb2f2868bd76bd71cee016cffa6faf5f045a60cc8c6eddd,ValueFrom:nil,},EnvVar{Name:OVN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,ValueFrom:nil,},EnvVar{Name:OVN_NB_RAFT_ELECTION_TIMER,Value:10,ValueFrom:nil,},EnvVar{Name:OVN_SB_RAFT_ELECTION_TIMER,Value:16,ValueFrom:nil,},EnvVar{Name:OVN_NORTHD_PROBE_INTERVAL,Value:10000,ValueFrom:nil,},EnvVar{Name:OVN_CONTROLLER_INACTIVITY_PROBE,Value:180000,ValueFrom:nil,},EnvVar{Name:OVN_NB_INACTIVITY_PROBE,Value:60000,ValueFrom:nil,},EnvVar{Name:EGRESS_ROUTER_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,ValueFrom:nil,},EnvVar{Name:NETWORK_METRICS_DAEMON_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_SOURCE_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_TARGET_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_OPERATOR_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:CLOUD_NETWORK_CONFIG_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8048f1cb0be521f09749c0a489503cd56d85b68c6ca93380e082cfd693cd97a8,ValueFrom:nil,},EnvVar{Name:CLI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,ValueFrom:nil,},EnvVar{Name:FRR_K8S_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5dbf844e49bb46b78586930149e5e5f5dc121014c8afd10fe36f3651967cc256,ValueFrom:nil,},EnvVar{Name:NETWORKING_CONSOLE_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd,ValueFrom:nil,},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host-etc-kube,ReadOnly:true,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-tls,ReadOnly:false,MountPath:/var/run/secrets/serving-cert,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rdwmf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-operator-58b4c7f79c-55gtf_openshift-network-operator(37a5e44f-9a88-4405-be8a-b645485e7312): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 27 12:26:50 crc kubenswrapper[4900]: > logger="UnhandledError" Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.260679 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"network-operator\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" podUID="37a5e44f-9a88-4405-be8a-b645485e7312" Jan 27 12:26:50 crc kubenswrapper[4900]: W0127 12:26:50.263184 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-86159af9b21a4bd26f6a37bec17b4fa648c2bf38e3a4b80fb8816016b29b8113 WatchSource:0}: Error finding container 86159af9b21a4bd26f6a37bec17b4fa648c2bf38e3a4b80fb8816016b29b8113: Status 404 returned error can't find the container with id 86159af9b21a4bd26f6a37bec17b4fa648c2bf38e3a4b80fb8816016b29b8113 Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.264750 4900 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 27 12:26:50 crc kubenswrapper[4900]: container &Container{Name:webhook,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Jan 27 12:26:50 crc kubenswrapper[4900]: if [[ -f "/env/_master" ]]; then Jan 27 12:26:50 crc kubenswrapper[4900]: set -o allexport Jan 27 12:26:50 crc kubenswrapper[4900]: source "/env/_master" Jan 27 12:26:50 crc kubenswrapper[4900]: set +o allexport Jan 27 12:26:50 crc kubenswrapper[4900]: fi Jan 27 12:26:50 crc kubenswrapper[4900]: # OVN-K will try to remove hybrid overlay node annotations even when the hybrid overlay is not enabled. Jan 27 12:26:50 crc kubenswrapper[4900]: # https://github.com/ovn-org/ovn-kubernetes/blob/ac6820df0b338a246f10f412cd5ec903bd234694/go-controller/pkg/ovn/master.go#L791 Jan 27 12:26:50 crc kubenswrapper[4900]: ho_enable="--enable-hybrid-overlay" Jan 27 12:26:50 crc kubenswrapper[4900]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start webhook" Jan 27 12:26:50 crc kubenswrapper[4900]: # extra-allowed-user: service account `ovn-kubernetes-control-plane` Jan 27 12:26:50 crc kubenswrapper[4900]: # sets pod annotations in multi-homing layer3 network controller (cluster-manager) Jan 27 12:26:50 crc kubenswrapper[4900]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Jan 27 12:26:50 crc kubenswrapper[4900]: --webhook-cert-dir="/etc/webhook-cert" \ Jan 27 12:26:50 crc kubenswrapper[4900]: --webhook-host=127.0.0.1 \ Jan 27 12:26:50 crc kubenswrapper[4900]: --webhook-port=9743 \ Jan 27 12:26:50 crc kubenswrapper[4900]: ${ho_enable} \ Jan 27 12:26:50 crc kubenswrapper[4900]: --enable-interconnect \ Jan 27 12:26:50 crc kubenswrapper[4900]: --disable-approver \ Jan 27 12:26:50 crc kubenswrapper[4900]: --extra-allowed-user="system:serviceaccount:openshift-ovn-kubernetes:ovn-kubernetes-control-plane" \ Jan 27 12:26:50 crc kubenswrapper[4900]: --wait-for-kubernetes-api=200s \ Jan 27 12:26:50 crc kubenswrapper[4900]: --pod-admission-conditions="/var/run/ovnkube-identity-config/additional-pod-admission-cond.json" \ Jan 27 12:26:50 crc kubenswrapper[4900]: --loglevel="${LOGLEVEL}" Jan 27 12:26:50 crc kubenswrapper[4900]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:2,ValueFrom:nil,},EnvVar{Name:KUBERNETES_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/etc/webhook-cert/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 27 12:26:50 crc kubenswrapper[4900]: > logger="UnhandledError" Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.267564 4900 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 27 12:26:50 crc kubenswrapper[4900]: container &Container{Name:approver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Jan 27 12:26:50 crc kubenswrapper[4900]: if [[ -f "/env/_master" ]]; then Jan 27 12:26:50 crc kubenswrapper[4900]: set -o allexport Jan 27 12:26:50 crc kubenswrapper[4900]: source "/env/_master" Jan 27 12:26:50 crc kubenswrapper[4900]: set +o allexport Jan 27 12:26:50 crc kubenswrapper[4900]: fi Jan 27 12:26:50 crc kubenswrapper[4900]: Jan 27 12:26:50 crc kubenswrapper[4900]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start approver" Jan 27 12:26:50 crc kubenswrapper[4900]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Jan 27 12:26:50 crc kubenswrapper[4900]: --disable-webhook \ Jan 27 12:26:50 crc kubenswrapper[4900]: --csr-acceptance-conditions="/var/run/ovnkube-identity-config/additional-cert-acceptance-cond.json" \ Jan 27 12:26:50 crc kubenswrapper[4900]: --loglevel="${LOGLEVEL}" Jan 27 12:26:50 crc kubenswrapper[4900]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:4,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 27 12:26:50 crc kubenswrapper[4900]: > logger="UnhandledError" Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.268720 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"webhook\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"approver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-network-node-identity/network-node-identity-vrzqb" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.274173 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.335244 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.335374 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqc4r\" (UniqueName: \"kubernetes.io/projected/d3d3b849-9dcd-43d2-a8e2-26a3f37ef978-kube-api-access-mqc4r\") pod \"node-resolver-22mkn\" (UID: \"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\") " pod="openshift-dns/node-resolver-22mkn" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.335428 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.335519 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d3d3b849-9dcd-43d2-a8e2-26a3f37ef978-hosts-file\") pod \"node-resolver-22mkn\" (UID: \"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\") " pod="openshift-dns/node-resolver-22mkn" Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.335597 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:26:51.335557784 +0000 UTC m=+38.572585994 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.335659 4900 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.335730 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:51.335711498 +0000 UTC m=+38.572739708 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.335730 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d3d3b849-9dcd-43d2-a8e2-26a3f37ef978-hosts-file\") pod \"node-resolver-22mkn\" (UID: \"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\") " pod="openshift-dns/node-resolver-22mkn" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.335786 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.335971 4900 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.336145 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:51.33611131 +0000 UTC m=+38.573139520 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.485886 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.486714 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.488215 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.489147 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.490692 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.491303 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.491954 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.493332 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.494470 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.496004 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.496723 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.498127 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.499003 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.499810 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.501147 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.501985 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.503668 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.504362 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.505405 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.507011 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.513997 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.515227 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.515755 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.517170 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.517694 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.518929 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.519849 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.521313 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.521997 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.523103 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.523705 4900 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.523817 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.526772 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.527617 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.528474 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.530968 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.532226 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.533038 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.534413 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.535424 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.536572 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.537788 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.538433 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.538737 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.538795 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.538940 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.539148 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.539189 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.539207 4900 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.539272 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:51.53925205 +0000 UTC m=+38.776280260 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.539282 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.540132 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.541427 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.542271 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.556762 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.556933 4900 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.557179 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:51.557137698 +0000 UTC m=+38.794165908 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.558753 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.560659 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.561497 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.562343 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.564151 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.565200 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.566801 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.612509 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 22:43:45.631871393 +0000 UTC Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.670498 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.697916 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqc4r\" (UniqueName: \"kubernetes.io/projected/d3d3b849-9dcd-43d2-a8e2-26a3f37ef978-kube-api-access-mqc4r\") pod \"node-resolver-22mkn\" (UID: \"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\") " pod="openshift-dns/node-resolver-22mkn" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.797750 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-22mkn" Jan 27 12:26:50 crc kubenswrapper[4900]: W0127 12:26:50.856666 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd3d3b849_9dcd_43d2_a8e2_26a3f37ef978.slice/crio-5d595c8e36d00c329b4a4ecd24c6515c264a84113d2f41958f36d7bad49a1fab WatchSource:0}: Error finding container 5d595c8e36d00c329b4a4ecd24c6515c264a84113d2f41958f36d7bad49a1fab: Status 404 returned error can't find the container with id 5d595c8e36d00c329b4a4ecd24c6515c264a84113d2f41958f36d7bad49a1fab Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.858908 4900 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 27 12:26:50 crc kubenswrapper[4900]: container &Container{Name:dns-node-resolver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/bin/bash -c #!/bin/bash Jan 27 12:26:50 crc kubenswrapper[4900]: set -uo pipefail Jan 27 12:26:50 crc kubenswrapper[4900]: Jan 27 12:26:50 crc kubenswrapper[4900]: trap 'jobs -p | xargs kill || true; wait; exit 0' TERM Jan 27 12:26:50 crc kubenswrapper[4900]: Jan 27 12:26:50 crc kubenswrapper[4900]: OPENSHIFT_MARKER="openshift-generated-node-resolver" Jan 27 12:26:50 crc kubenswrapper[4900]: HOSTS_FILE="/etc/hosts" Jan 27 12:26:50 crc kubenswrapper[4900]: TEMP_FILE="/etc/hosts.tmp" Jan 27 12:26:50 crc kubenswrapper[4900]: Jan 27 12:26:50 crc kubenswrapper[4900]: IFS=', ' read -r -a services <<< "${SERVICES}" Jan 27 12:26:50 crc kubenswrapper[4900]: Jan 27 12:26:50 crc kubenswrapper[4900]: # Make a temporary file with the old hosts file's attributes. Jan 27 12:26:50 crc kubenswrapper[4900]: if ! cp -f --attributes-only "${HOSTS_FILE}" "${TEMP_FILE}"; then Jan 27 12:26:50 crc kubenswrapper[4900]: echo "Failed to preserve hosts file. Exiting." Jan 27 12:26:50 crc kubenswrapper[4900]: exit 1 Jan 27 12:26:50 crc kubenswrapper[4900]: fi Jan 27 12:26:50 crc kubenswrapper[4900]: Jan 27 12:26:50 crc kubenswrapper[4900]: while true; do Jan 27 12:26:50 crc kubenswrapper[4900]: declare -A svc_ips Jan 27 12:26:50 crc kubenswrapper[4900]: for svc in "${services[@]}"; do Jan 27 12:26:50 crc kubenswrapper[4900]: # Fetch service IP from cluster dns if present. We make several tries Jan 27 12:26:50 crc kubenswrapper[4900]: # to do it: IPv4, IPv6, IPv4 over TCP and IPv6 over TCP. The two last ones Jan 27 12:26:50 crc kubenswrapper[4900]: # are for deployments with Kuryr on older OpenStack (OSP13) - those do not Jan 27 12:26:50 crc kubenswrapper[4900]: # support UDP loadbalancers and require reaching DNS through TCP. Jan 27 12:26:50 crc kubenswrapper[4900]: cmds=('dig -t A @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Jan 27 12:26:50 crc kubenswrapper[4900]: 'dig -t AAAA @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Jan 27 12:26:50 crc kubenswrapper[4900]: 'dig -t A +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Jan 27 12:26:50 crc kubenswrapper[4900]: 'dig -t AAAA +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"') Jan 27 12:26:50 crc kubenswrapper[4900]: for i in ${!cmds[*]} Jan 27 12:26:50 crc kubenswrapper[4900]: do Jan 27 12:26:50 crc kubenswrapper[4900]: ips=($(eval "${cmds[i]}")) Jan 27 12:26:50 crc kubenswrapper[4900]: if [[ "$?" -eq 0 && "${#ips[@]}" -ne 0 ]]; then Jan 27 12:26:50 crc kubenswrapper[4900]: svc_ips["${svc}"]="${ips[@]}" Jan 27 12:26:50 crc kubenswrapper[4900]: break Jan 27 12:26:50 crc kubenswrapper[4900]: fi Jan 27 12:26:50 crc kubenswrapper[4900]: done Jan 27 12:26:50 crc kubenswrapper[4900]: done Jan 27 12:26:50 crc kubenswrapper[4900]: Jan 27 12:26:50 crc kubenswrapper[4900]: # Update /etc/hosts only if we get valid service IPs Jan 27 12:26:50 crc kubenswrapper[4900]: # We will not update /etc/hosts when there is coredns service outage or api unavailability Jan 27 12:26:50 crc kubenswrapper[4900]: # Stale entries could exist in /etc/hosts if the service is deleted Jan 27 12:26:50 crc kubenswrapper[4900]: if [[ -n "${svc_ips[*]-}" ]]; then Jan 27 12:26:50 crc kubenswrapper[4900]: # Build a new hosts file from /etc/hosts with our custom entries filtered out Jan 27 12:26:50 crc kubenswrapper[4900]: if ! sed --silent "/# ${OPENSHIFT_MARKER}/d; w ${TEMP_FILE}" "${HOSTS_FILE}"; then Jan 27 12:26:50 crc kubenswrapper[4900]: # Only continue rebuilding the hosts entries if its original content is preserved Jan 27 12:26:50 crc kubenswrapper[4900]: sleep 60 & wait Jan 27 12:26:50 crc kubenswrapper[4900]: continue Jan 27 12:26:50 crc kubenswrapper[4900]: fi Jan 27 12:26:50 crc kubenswrapper[4900]: Jan 27 12:26:50 crc kubenswrapper[4900]: # Append resolver entries for services Jan 27 12:26:50 crc kubenswrapper[4900]: rc=0 Jan 27 12:26:50 crc kubenswrapper[4900]: for svc in "${!svc_ips[@]}"; do Jan 27 12:26:50 crc kubenswrapper[4900]: for ip in ${svc_ips[${svc}]}; do Jan 27 12:26:50 crc kubenswrapper[4900]: echo "${ip} ${svc} ${svc}.${CLUSTER_DOMAIN} # ${OPENSHIFT_MARKER}" >> "${TEMP_FILE}" || rc=$? Jan 27 12:26:50 crc kubenswrapper[4900]: done Jan 27 12:26:50 crc kubenswrapper[4900]: done Jan 27 12:26:50 crc kubenswrapper[4900]: if [[ $rc -ne 0 ]]; then Jan 27 12:26:50 crc kubenswrapper[4900]: sleep 60 & wait Jan 27 12:26:50 crc kubenswrapper[4900]: continue Jan 27 12:26:50 crc kubenswrapper[4900]: fi Jan 27 12:26:50 crc kubenswrapper[4900]: Jan 27 12:26:50 crc kubenswrapper[4900]: Jan 27 12:26:50 crc kubenswrapper[4900]: # TODO: Update /etc/hosts atomically to avoid any inconsistent behavior Jan 27 12:26:50 crc kubenswrapper[4900]: # Replace /etc/hosts with our modified version if needed Jan 27 12:26:50 crc kubenswrapper[4900]: cmp "${TEMP_FILE}" "${HOSTS_FILE}" || cp -f "${TEMP_FILE}" "${HOSTS_FILE}" Jan 27 12:26:50 crc kubenswrapper[4900]: # TEMP_FILE is not removed to avoid file create/delete and attributes copy churn Jan 27 12:26:50 crc kubenswrapper[4900]: fi Jan 27 12:26:50 crc kubenswrapper[4900]: sleep 60 & wait Jan 27 12:26:50 crc kubenswrapper[4900]: unset svc_ips Jan 27 12:26:50 crc kubenswrapper[4900]: done Jan 27 12:26:50 crc kubenswrapper[4900]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:SERVICES,Value:image-registry.openshift-image-registry.svc,ValueFrom:nil,},EnvVar{Name:NAMESERVER,Value:10.217.4.10,ValueFrom:nil,},EnvVar{Name:CLUSTER_DOMAIN,Value:cluster.local,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{22020096 0} {} 21Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:hosts-file,ReadOnly:false,MountPath:/etc/hosts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mqc4r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod node-resolver-22mkn_openshift-dns(d3d3b849-9dcd-43d2-a8e2-26a3f37ef978): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 27 12:26:50 crc kubenswrapper[4900]: > logger="UnhandledError" Jan 27 12:26:50 crc kubenswrapper[4900]: E0127 12:26:50.860090 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dns-node-resolver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-dns/node-resolver-22mkn" podUID="d3d3b849-9dcd-43d2-a8e2-26a3f37ef978" Jan 27 12:26:50 crc kubenswrapper[4900]: I0127 12:26:50.913075 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.194815 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"c99b4384fd5da33a3514ccd6d51f1a66dbd58ca0c9c8c89f30aa42b74f00ea86"} Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.196344 4900 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 27 12:26:51 crc kubenswrapper[4900]: container &Container{Name:network-operator,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,Command:[/bin/bash -c #!/bin/bash Jan 27 12:26:51 crc kubenswrapper[4900]: set -o allexport Jan 27 12:26:51 crc kubenswrapper[4900]: if [[ -f /etc/kubernetes/apiserver-url.env ]]; then Jan 27 12:26:51 crc kubenswrapper[4900]: source /etc/kubernetes/apiserver-url.env Jan 27 12:26:51 crc kubenswrapper[4900]: else Jan 27 12:26:51 crc kubenswrapper[4900]: echo "Error: /etc/kubernetes/apiserver-url.env is missing" Jan 27 12:26:51 crc kubenswrapper[4900]: exit 1 Jan 27 12:26:51 crc kubenswrapper[4900]: fi Jan 27 12:26:51 crc kubenswrapper[4900]: exec /usr/bin/cluster-network-operator start --listen=0.0.0.0:9104 Jan 27 12:26:51 crc kubenswrapper[4900]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:cno,HostPort:9104,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:RELEASE_VERSION,Value:4.18.1,ValueFrom:nil,},EnvVar{Name:KUBE_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b97554198294bf544fbc116c94a0a1fb2ec8a4de0e926bf9d9e320135f0bee6f,ValueFrom:nil,},EnvVar{Name:KUBE_RBAC_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,ValueFrom:nil,},EnvVar{Name:MULTUS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,ValueFrom:nil,},EnvVar{Name:MULTUS_ADMISSION_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317,ValueFrom:nil,},EnvVar{Name:CNI_PLUGINS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc,ValueFrom:nil,},EnvVar{Name:BOND_CNI_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78,ValueFrom:nil,},EnvVar{Name:WHEREABOUTS_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4,ValueFrom:nil,},EnvVar{Name:ROUTE_OVERRRIDE_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa,ValueFrom:nil,},EnvVar{Name:MULTUS_NETWORKPOLICY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:23f833d3738d68706eb2f2868bd76bd71cee016cffa6faf5f045a60cc8c6eddd,ValueFrom:nil,},EnvVar{Name:OVN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,ValueFrom:nil,},EnvVar{Name:OVN_NB_RAFT_ELECTION_TIMER,Value:10,ValueFrom:nil,},EnvVar{Name:OVN_SB_RAFT_ELECTION_TIMER,Value:16,ValueFrom:nil,},EnvVar{Name:OVN_NORTHD_PROBE_INTERVAL,Value:10000,ValueFrom:nil,},EnvVar{Name:OVN_CONTROLLER_INACTIVITY_PROBE,Value:180000,ValueFrom:nil,},EnvVar{Name:OVN_NB_INACTIVITY_PROBE,Value:60000,ValueFrom:nil,},EnvVar{Name:EGRESS_ROUTER_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,ValueFrom:nil,},EnvVar{Name:NETWORK_METRICS_DAEMON_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_SOURCE_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_TARGET_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_OPERATOR_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:CLOUD_NETWORK_CONFIG_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8048f1cb0be521f09749c0a489503cd56d85b68c6ca93380e082cfd693cd97a8,ValueFrom:nil,},EnvVar{Name:CLI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,ValueFrom:nil,},EnvVar{Name:FRR_K8S_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5dbf844e49bb46b78586930149e5e5f5dc121014c8afd10fe36f3651967cc256,ValueFrom:nil,},EnvVar{Name:NETWORKING_CONSOLE_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd,ValueFrom:nil,},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host-etc-kube,ReadOnly:true,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-tls,ReadOnly:false,MountPath:/var/run/secrets/serving-cert,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rdwmf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-operator-58b4c7f79c-55gtf_openshift-network-operator(37a5e44f-9a88-4405-be8a-b645485e7312): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 27 12:26:51 crc kubenswrapper[4900]: > logger="UnhandledError" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.197086 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-22mkn" event={"ID":"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978","Type":"ContainerStarted","Data":"5d595c8e36d00c329b4a4ecd24c6515c264a84113d2f41958f36d7bad49a1fab"} Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.197463 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"network-operator\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" podUID="37a5e44f-9a88-4405-be8a-b645485e7312" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.198119 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"86159af9b21a4bd26f6a37bec17b4fa648c2bf38e3a4b80fb8816016b29b8113"} Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.199777 4900 scope.go:117] "RemoveContainer" containerID="1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b" Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.199889 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.199921 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"2d48a13f01c133262762a9cbc014764a017aae3731e03ee6049d6658aec71724"} Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.200341 4900 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 27 12:26:51 crc kubenswrapper[4900]: container &Container{Name:dns-node-resolver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/bin/bash -c #!/bin/bash Jan 27 12:26:51 crc kubenswrapper[4900]: set -uo pipefail Jan 27 12:26:51 crc kubenswrapper[4900]: Jan 27 12:26:51 crc kubenswrapper[4900]: trap 'jobs -p | xargs kill || true; wait; exit 0' TERM Jan 27 12:26:51 crc kubenswrapper[4900]: Jan 27 12:26:51 crc kubenswrapper[4900]: OPENSHIFT_MARKER="openshift-generated-node-resolver" Jan 27 12:26:51 crc kubenswrapper[4900]: HOSTS_FILE="/etc/hosts" Jan 27 12:26:51 crc kubenswrapper[4900]: TEMP_FILE="/etc/hosts.tmp" Jan 27 12:26:51 crc kubenswrapper[4900]: Jan 27 12:26:51 crc kubenswrapper[4900]: IFS=', ' read -r -a services <<< "${SERVICES}" Jan 27 12:26:51 crc kubenswrapper[4900]: Jan 27 12:26:51 crc kubenswrapper[4900]: # Make a temporary file with the old hosts file's attributes. Jan 27 12:26:51 crc kubenswrapper[4900]: if ! cp -f --attributes-only "${HOSTS_FILE}" "${TEMP_FILE}"; then Jan 27 12:26:51 crc kubenswrapper[4900]: echo "Failed to preserve hosts file. Exiting." Jan 27 12:26:51 crc kubenswrapper[4900]: exit 1 Jan 27 12:26:51 crc kubenswrapper[4900]: fi Jan 27 12:26:51 crc kubenswrapper[4900]: Jan 27 12:26:51 crc kubenswrapper[4900]: while true; do Jan 27 12:26:51 crc kubenswrapper[4900]: declare -A svc_ips Jan 27 12:26:51 crc kubenswrapper[4900]: for svc in "${services[@]}"; do Jan 27 12:26:51 crc kubenswrapper[4900]: # Fetch service IP from cluster dns if present. We make several tries Jan 27 12:26:51 crc kubenswrapper[4900]: # to do it: IPv4, IPv6, IPv4 over TCP and IPv6 over TCP. The two last ones Jan 27 12:26:51 crc kubenswrapper[4900]: # are for deployments with Kuryr on older OpenStack (OSP13) - those do not Jan 27 12:26:51 crc kubenswrapper[4900]: # support UDP loadbalancers and require reaching DNS through TCP. Jan 27 12:26:51 crc kubenswrapper[4900]: cmds=('dig -t A @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Jan 27 12:26:51 crc kubenswrapper[4900]: 'dig -t AAAA @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Jan 27 12:26:51 crc kubenswrapper[4900]: 'dig -t A +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Jan 27 12:26:51 crc kubenswrapper[4900]: 'dig -t AAAA +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"') Jan 27 12:26:51 crc kubenswrapper[4900]: for i in ${!cmds[*]} Jan 27 12:26:51 crc kubenswrapper[4900]: do Jan 27 12:26:51 crc kubenswrapper[4900]: ips=($(eval "${cmds[i]}")) Jan 27 12:26:51 crc kubenswrapper[4900]: if [[ "$?" -eq 0 && "${#ips[@]}" -ne 0 ]]; then Jan 27 12:26:51 crc kubenswrapper[4900]: svc_ips["${svc}"]="${ips[@]}" Jan 27 12:26:51 crc kubenswrapper[4900]: break Jan 27 12:26:51 crc kubenswrapper[4900]: fi Jan 27 12:26:51 crc kubenswrapper[4900]: done Jan 27 12:26:51 crc kubenswrapper[4900]: done Jan 27 12:26:51 crc kubenswrapper[4900]: Jan 27 12:26:51 crc kubenswrapper[4900]: # Update /etc/hosts only if we get valid service IPs Jan 27 12:26:51 crc kubenswrapper[4900]: # We will not update /etc/hosts when there is coredns service outage or api unavailability Jan 27 12:26:51 crc kubenswrapper[4900]: # Stale entries could exist in /etc/hosts if the service is deleted Jan 27 12:26:51 crc kubenswrapper[4900]: if [[ -n "${svc_ips[*]-}" ]]; then Jan 27 12:26:51 crc kubenswrapper[4900]: # Build a new hosts file from /etc/hosts with our custom entries filtered out Jan 27 12:26:51 crc kubenswrapper[4900]: if ! sed --silent "/# ${OPENSHIFT_MARKER}/d; w ${TEMP_FILE}" "${HOSTS_FILE}"; then Jan 27 12:26:51 crc kubenswrapper[4900]: # Only continue rebuilding the hosts entries if its original content is preserved Jan 27 12:26:51 crc kubenswrapper[4900]: sleep 60 & wait Jan 27 12:26:51 crc kubenswrapper[4900]: continue Jan 27 12:26:51 crc kubenswrapper[4900]: fi Jan 27 12:26:51 crc kubenswrapper[4900]: Jan 27 12:26:51 crc kubenswrapper[4900]: # Append resolver entries for services Jan 27 12:26:51 crc kubenswrapper[4900]: rc=0 Jan 27 12:26:51 crc kubenswrapper[4900]: for svc in "${!svc_ips[@]}"; do Jan 27 12:26:51 crc kubenswrapper[4900]: for ip in ${svc_ips[${svc}]}; do Jan 27 12:26:51 crc kubenswrapper[4900]: echo "${ip} ${svc} ${svc}.${CLUSTER_DOMAIN} # ${OPENSHIFT_MARKER}" >> "${TEMP_FILE}" || rc=$? Jan 27 12:26:51 crc kubenswrapper[4900]: done Jan 27 12:26:51 crc kubenswrapper[4900]: done Jan 27 12:26:51 crc kubenswrapper[4900]: if [[ $rc -ne 0 ]]; then Jan 27 12:26:51 crc kubenswrapper[4900]: sleep 60 & wait Jan 27 12:26:51 crc kubenswrapper[4900]: continue Jan 27 12:26:51 crc kubenswrapper[4900]: fi Jan 27 12:26:51 crc kubenswrapper[4900]: Jan 27 12:26:51 crc kubenswrapper[4900]: Jan 27 12:26:51 crc kubenswrapper[4900]: # TODO: Update /etc/hosts atomically to avoid any inconsistent behavior Jan 27 12:26:51 crc kubenswrapper[4900]: # Replace /etc/hosts with our modified version if needed Jan 27 12:26:51 crc kubenswrapper[4900]: cmp "${TEMP_FILE}" "${HOSTS_FILE}" || cp -f "${TEMP_FILE}" "${HOSTS_FILE}" Jan 27 12:26:51 crc kubenswrapper[4900]: # TEMP_FILE is not removed to avoid file create/delete and attributes copy churn Jan 27 12:26:51 crc kubenswrapper[4900]: fi Jan 27 12:26:51 crc kubenswrapper[4900]: sleep 60 & wait Jan 27 12:26:51 crc kubenswrapper[4900]: unset svc_ips Jan 27 12:26:51 crc kubenswrapper[4900]: done Jan 27 12:26:51 crc kubenswrapper[4900]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:SERVICES,Value:image-registry.openshift-image-registry.svc,ValueFrom:nil,},EnvVar{Name:NAMESERVER,Value:10.217.4.10,ValueFrom:nil,},EnvVar{Name:CLUSTER_DOMAIN,Value:cluster.local,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{22020096 0} {} 21Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:hosts-file,ReadOnly:false,MountPath:/etc/hosts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mqc4r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod node-resolver-22mkn_openshift-dns(d3d3b849-9dcd-43d2-a8e2-26a3f37ef978): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 27 12:26:51 crc kubenswrapper[4900]: > logger="UnhandledError" Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.200500 4900 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 27 12:26:51 crc kubenswrapper[4900]: container &Container{Name:webhook,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Jan 27 12:26:51 crc kubenswrapper[4900]: if [[ -f "/env/_master" ]]; then Jan 27 12:26:51 crc kubenswrapper[4900]: set -o allexport Jan 27 12:26:51 crc kubenswrapper[4900]: source "/env/_master" Jan 27 12:26:51 crc kubenswrapper[4900]: set +o allexport Jan 27 12:26:51 crc kubenswrapper[4900]: fi Jan 27 12:26:51 crc kubenswrapper[4900]: # OVN-K will try to remove hybrid overlay node annotations even when the hybrid overlay is not enabled. Jan 27 12:26:51 crc kubenswrapper[4900]: # https://github.com/ovn-org/ovn-kubernetes/blob/ac6820df0b338a246f10f412cd5ec903bd234694/go-controller/pkg/ovn/master.go#L791 Jan 27 12:26:51 crc kubenswrapper[4900]: ho_enable="--enable-hybrid-overlay" Jan 27 12:26:51 crc kubenswrapper[4900]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start webhook" Jan 27 12:26:51 crc kubenswrapper[4900]: # extra-allowed-user: service account `ovn-kubernetes-control-plane` Jan 27 12:26:51 crc kubenswrapper[4900]: # sets pod annotations in multi-homing layer3 network controller (cluster-manager) Jan 27 12:26:51 crc kubenswrapper[4900]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Jan 27 12:26:51 crc kubenswrapper[4900]: --webhook-cert-dir="/etc/webhook-cert" \ Jan 27 12:26:51 crc kubenswrapper[4900]: --webhook-host=127.0.0.1 \ Jan 27 12:26:51 crc kubenswrapper[4900]: --webhook-port=9743 \ Jan 27 12:26:51 crc kubenswrapper[4900]: ${ho_enable} \ Jan 27 12:26:51 crc kubenswrapper[4900]: --enable-interconnect \ Jan 27 12:26:51 crc kubenswrapper[4900]: --disable-approver \ Jan 27 12:26:51 crc kubenswrapper[4900]: --extra-allowed-user="system:serviceaccount:openshift-ovn-kubernetes:ovn-kubernetes-control-plane" \ Jan 27 12:26:51 crc kubenswrapper[4900]: --wait-for-kubernetes-api=200s \ Jan 27 12:26:51 crc kubenswrapper[4900]: --pod-admission-conditions="/var/run/ovnkube-identity-config/additional-pod-admission-cond.json" \ Jan 27 12:26:51 crc kubenswrapper[4900]: --loglevel="${LOGLEVEL}" Jan 27 12:26:51 crc kubenswrapper[4900]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:2,ValueFrom:nil,},EnvVar{Name:KUBERNETES_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/etc/webhook-cert/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 27 12:26:51 crc kubenswrapper[4900]: > logger="UnhandledError" Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.200822 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:iptables-alerter,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/iptables-alerter/iptables-alerter.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONTAINER_RUNTIME_ENDPOINT,Value:unix:///run/crio/crio.sock,ValueFrom:nil,},EnvVar{Name:ALERTER_POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:iptables-alerter-script,ReadOnly:false,MountPath:/iptables-alerter,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-slash,ReadOnly:true,MountPath:/host,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rczfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod iptables-alerter-4ln5h_openshift-network-operator(d75a4c96-2883-4a0b-bab2-0fab2b6c0b49): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.204710 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dns-node-resolver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-dns/node-resolver-22mkn" podUID="d3d3b849-9dcd-43d2-a8e2-26a3f37ef978" Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.204798 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"iptables-alerter\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/iptables-alerter-4ln5h" podUID="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.204770 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.205642 4900 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 27 12:26:51 crc kubenswrapper[4900]: container &Container{Name:approver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Jan 27 12:26:51 crc kubenswrapper[4900]: if [[ -f "/env/_master" ]]; then Jan 27 12:26:51 crc kubenswrapper[4900]: set -o allexport Jan 27 12:26:51 crc kubenswrapper[4900]: source "/env/_master" Jan 27 12:26:51 crc kubenswrapper[4900]: set +o allexport Jan 27 12:26:51 crc kubenswrapper[4900]: fi Jan 27 12:26:51 crc kubenswrapper[4900]: Jan 27 12:26:51 crc kubenswrapper[4900]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start approver" Jan 27 12:26:51 crc kubenswrapper[4900]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Jan 27 12:26:51 crc kubenswrapper[4900]: --disable-webhook \ Jan 27 12:26:51 crc kubenswrapper[4900]: --csr-acceptance-conditions="/var/run/ovnkube-identity-config/additional-cert-acceptance-cond.json" \ Jan 27 12:26:51 crc kubenswrapper[4900]: --loglevel="${LOGLEVEL}" Jan 27 12:26:51 crc kubenswrapper[4900]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:4,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 27 12:26:51 crc kubenswrapper[4900]: > logger="UnhandledError" Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.206806 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"webhook\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"approver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-network-node-identity/network-node-identity-vrzqb" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.299270 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.355473 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.355587 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.355623 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.355735 4900 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.355789 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:53.355772621 +0000 UTC m=+40.592800831 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.355843 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:26:53.355837273 +0000 UTC m=+40.592865483 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.355872 4900 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.355892 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:53.355886565 +0000 UTC m=+40.592914775 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.387543 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.407484 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.418012 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.433915 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.449557 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.462368 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.480997 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.481140 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.481194 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.481193 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.481334 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.481459 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.481509 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.493594 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.511136 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.554378 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.557293 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.557362 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.557493 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.557532 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.557548 4900 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.557493 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.557624 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.557637 4900 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.557606 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:53.557584943 +0000 UTC m=+40.794613153 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:51 crc kubenswrapper[4900]: E0127 12:26:51.557688 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:53.557676676 +0000 UTC m=+40.794704896 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.613038 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 01:34:20.976640163 +0000 UTC Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.749964 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-2pp6x"] Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.751003 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.757751 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.757788 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.757810 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.758082 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.758611 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.821211 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.937304 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.972181 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.979116 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2f2c6408-cc23-4b42-92ba-ef08be13637b-mcd-auth-proxy-config\") pod \"machine-config-daemon-2pp6x\" (UID: \"2f2c6408-cc23-4b42-92ba-ef08be13637b\") " pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.979165 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-664p6\" (UniqueName: \"kubernetes.io/projected/2f2c6408-cc23-4b42-92ba-ef08be13637b-kube-api-access-664p6\") pod \"machine-config-daemon-2pp6x\" (UID: \"2f2c6408-cc23-4b42-92ba-ef08be13637b\") " pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.979196 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2f2c6408-cc23-4b42-92ba-ef08be13637b-proxy-tls\") pod \"machine-config-daemon-2pp6x\" (UID: \"2f2c6408-cc23-4b42-92ba-ef08be13637b\") " pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.979235 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/2f2c6408-cc23-4b42-92ba-ef08be13637b-rootfs\") pod \"machine-config-daemon-2pp6x\" (UID: \"2f2c6408-cc23-4b42-92ba-ef08be13637b\") " pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.993923 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 12:26:51 crc kubenswrapper[4900]: I0127 12:26:51.998778 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.006542 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.080230 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2f2c6408-cc23-4b42-92ba-ef08be13637b-proxy-tls\") pod \"machine-config-daemon-2pp6x\" (UID: \"2f2c6408-cc23-4b42-92ba-ef08be13637b\") " pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.080383 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/2f2c6408-cc23-4b42-92ba-ef08be13637b-rootfs\") pod \"machine-config-daemon-2pp6x\" (UID: \"2f2c6408-cc23-4b42-92ba-ef08be13637b\") " pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.080415 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2f2c6408-cc23-4b42-92ba-ef08be13637b-mcd-auth-proxy-config\") pod \"machine-config-daemon-2pp6x\" (UID: \"2f2c6408-cc23-4b42-92ba-ef08be13637b\") " pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.080443 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-664p6\" (UniqueName: \"kubernetes.io/projected/2f2c6408-cc23-4b42-92ba-ef08be13637b-kube-api-access-664p6\") pod \"machine-config-daemon-2pp6x\" (UID: \"2f2c6408-cc23-4b42-92ba-ef08be13637b\") " pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.081313 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/2f2c6408-cc23-4b42-92ba-ef08be13637b-rootfs\") pod \"machine-config-daemon-2pp6x\" (UID: \"2f2c6408-cc23-4b42-92ba-ef08be13637b\") " pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.082536 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2f2c6408-cc23-4b42-92ba-ef08be13637b-mcd-auth-proxy-config\") pod \"machine-config-daemon-2pp6x\" (UID: \"2f2c6408-cc23-4b42-92ba-ef08be13637b\") " pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.086949 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2f2c6408-cc23-4b42-92ba-ef08be13637b-proxy-tls\") pod \"machine-config-daemon-2pp6x\" (UID: \"2f2c6408-cc23-4b42-92ba-ef08be13637b\") " pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.128926 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.150285 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-664p6\" (UniqueName: \"kubernetes.io/projected/2f2c6408-cc23-4b42-92ba-ef08be13637b-kube-api-access-664p6\") pod \"machine-config-daemon-2pp6x\" (UID: \"2f2c6408-cc23-4b42-92ba-ef08be13637b\") " pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.150271 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.205371 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.206579 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.370734 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.371687 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:26:52 crc kubenswrapper[4900]: W0127 12:26:52.394879 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2f2c6408_cc23_4b42_92ba_ef08be13637b.slice/crio-2df2ab19f05d1c3fad431ebab8a8e286ad480aa4e0f858d65500ca1b367492b6 WatchSource:0}: Error finding container 2df2ab19f05d1c3fad431ebab8a8e286ad480aa4e0f858d65500ca1b367492b6: Status 404 returned error can't find the container with id 2df2ab19f05d1c3fad431ebab8a8e286ad480aa4e0f858d65500ca1b367492b6 Jan 27 12:26:52 crc kubenswrapper[4900]: E0127 12:26:52.396952 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:machine-config-daemon,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a,Command:[/usr/bin/machine-config-daemon],Args:[start --payload-version=4.18.1],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:health,HostPort:8798,ContainerPort:8798,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:rootfs,ReadOnly:false,MountPath:/rootfs,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-664p6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/health,Port:{0 8798 },Host:127.0.0.1,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:120,TimeoutSeconds:1,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Jan 27 12:26:52 crc kubenswrapper[4900]: E0127 12:26:52.399373 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,Command:[],Args:[--secure-listen-address=0.0.0.0:9001 --config-file=/etc/kube-rbac-proxy/config-file.yaml --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 --tls-min-version=VersionTLS12 --upstream=http://127.0.0.1:8797 --logtostderr=true --tls-cert-file=/etc/tls/private/tls.crt --tls-private-key-file=/etc/tls/private/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:9001,ContainerPort:9001,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:proxy-tls,ReadOnly:false,MountPath:/etc/tls/private,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:mcd-auth-proxy-config,ReadOnly:false,MountPath:/etc/kube-rbac-proxy,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-664p6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Jan 27 12:26:52 crc kubenswrapper[4900]: E0127 12:26:52.400562 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"machine-config-daemon\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.403563 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-twlmq"] Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.403909 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.405422 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-lrt6s"] Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.406186 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.406276 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.407866 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.408120 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.408198 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.408341 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.408570 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.408713 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.421989 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.439677 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.452882 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.467718 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.484962 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-multus-socket-dir-parent\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485005 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-run-k8s-cni-cncf-io\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485024 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/206345a2-ea7e-4a32-8c93-414290ba5c92-cni-binary-copy\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485104 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-hostroot\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485154 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-run-netns\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485174 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-var-lib-cni-multus\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485193 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-cnibin\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485246 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-os-release\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485269 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-multus-conf-dir\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485373 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hf9p\" (UniqueName: \"kubernetes.io/projected/02bfa799-f281-465d-ab6f-19ea9c16979c-kube-api-access-9hf9p\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485463 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/206345a2-ea7e-4a32-8c93-414290ba5c92-system-cni-dir\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485492 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-var-lib-kubelet\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485535 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-system-cni-dir\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485580 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/206345a2-ea7e-4a32-8c93-414290ba5c92-cnibin\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485582 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485628 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/206345a2-ea7e-4a32-8c93-414290ba5c92-os-release\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485683 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hg9l4\" (UniqueName: \"kubernetes.io/projected/206345a2-ea7e-4a32-8c93-414290ba5c92-kube-api-access-hg9l4\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485712 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-run-multus-certs\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485786 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-var-lib-cni-bin\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485842 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/206345a2-ea7e-4a32-8c93-414290ba5c92-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485905 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/02bfa799-f281-465d-ab6f-19ea9c16979c-cni-binary-copy\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.485972 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/02bfa799-f281-465d-ab6f-19ea9c16979c-multus-daemon-config\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.486006 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-etc-kubernetes\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.486028 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/206345a2-ea7e-4a32-8c93-414290ba5c92-tuning-conf-dir\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.486071 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-multus-cni-dir\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.495676 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.507954 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.518180 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.527846 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.537581 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.545703 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.556226 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.566218 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.577271 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.586577 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-etc-kubernetes\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.586777 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/206345a2-ea7e-4a32-8c93-414290ba5c92-tuning-conf-dir\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.586927 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/02bfa799-f281-465d-ab6f-19ea9c16979c-multus-daemon-config\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.586729 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-etc-kubernetes\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587050 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-multus-cni-dir\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587328 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-multus-socket-dir-parent\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587361 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-run-k8s-cni-cncf-io\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587386 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/206345a2-ea7e-4a32-8c93-414290ba5c92-cni-binary-copy\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587412 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-hostroot\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587457 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-run-netns\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587481 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-var-lib-cni-multus\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587502 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-multus-conf-dir\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587528 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-cnibin\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587530 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-run-k8s-cni-cncf-io\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587549 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-os-release\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587599 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-run-netns\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587624 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/206345a2-ea7e-4a32-8c93-414290ba5c92-tuning-conf-dir\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587631 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hf9p\" (UniqueName: \"kubernetes.io/projected/02bfa799-f281-465d-ab6f-19ea9c16979c-kube-api-access-9hf9p\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587567 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-multus-socket-dir-parent\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587664 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/206345a2-ea7e-4a32-8c93-414290ba5c92-system-cni-dir\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587737 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/02bfa799-f281-465d-ab6f-19ea9c16979c-multus-daemon-config\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587760 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-os-release\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587795 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-multus-conf-dir\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587807 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-var-lib-cni-multus\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587818 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-hostroot\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587839 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/206345a2-ea7e-4a32-8c93-414290ba5c92-system-cni-dir\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587873 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-cnibin\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587922 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-var-lib-kubelet\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.587960 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-system-cni-dir\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588017 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-var-lib-kubelet\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588027 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/206345a2-ea7e-4a32-8c93-414290ba5c92-cnibin\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588109 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/206345a2-ea7e-4a32-8c93-414290ba5c92-os-release\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588111 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-system-cni-dir\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588169 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/206345a2-ea7e-4a32-8c93-414290ba5c92-cnibin\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588199 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/206345a2-ea7e-4a32-8c93-414290ba5c92-os-release\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588249 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-multus-cni-dir\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588268 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hg9l4\" (UniqueName: \"kubernetes.io/projected/206345a2-ea7e-4a32-8c93-414290ba5c92-kube-api-access-hg9l4\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588307 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-run-multus-certs\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588308 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/206345a2-ea7e-4a32-8c93-414290ba5c92-cni-binary-copy\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588329 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-var-lib-cni-bin\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588361 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/206345a2-ea7e-4a32-8c93-414290ba5c92-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588370 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-run-multus-certs\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588401 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/02bfa799-f281-465d-ab6f-19ea9c16979c-cni-binary-copy\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588412 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/02bfa799-f281-465d-ab6f-19ea9c16979c-host-var-lib-cni-bin\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588865 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/206345a2-ea7e-4a32-8c93-414290ba5c92-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.588987 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/02bfa799-f281-465d-ab6f-19ea9c16979c-cni-binary-copy\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.602833 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hf9p\" (UniqueName: \"kubernetes.io/projected/02bfa799-f281-465d-ab6f-19ea9c16979c-kube-api-access-9hf9p\") pod \"multus-twlmq\" (UID: \"02bfa799-f281-465d-ab6f-19ea9c16979c\") " pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.604671 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hg9l4\" (UniqueName: \"kubernetes.io/projected/206345a2-ea7e-4a32-8c93-414290ba5c92-kube-api-access-hg9l4\") pod \"multus-additional-cni-plugins-lrt6s\" (UID: \"206345a2-ea7e-4a32-8c93-414290ba5c92\") " pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.613226 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 10:26:45.381928412 +0000 UTC Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.716257 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-twlmq" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.723337 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" Jan 27 12:26:52 crc kubenswrapper[4900]: W0127 12:26:52.726468 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod02bfa799_f281_465d_ab6f_19ea9c16979c.slice/crio-afb9d0d107c2cd3397ade5f06e50d365543d09c56f53be5e2a2944ea5d2af108 WatchSource:0}: Error finding container afb9d0d107c2cd3397ade5f06e50d365543d09c56f53be5e2a2944ea5d2af108: Status 404 returned error can't find the container with id afb9d0d107c2cd3397ade5f06e50d365543d09c56f53be5e2a2944ea5d2af108 Jan 27 12:26:52 crc kubenswrapper[4900]: E0127 12:26:52.732032 4900 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 27 12:26:52 crc kubenswrapper[4900]: container &Container{Name:kube-multus,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,Command:[/bin/bash -ec --],Args:[MULTUS_DAEMON_OPT="" Jan 27 12:26:52 crc kubenswrapper[4900]: /entrypoint/cnibincopy.sh; exec /usr/src/multus-cni/bin/multus-daemon $MULTUS_DAEMON_OPT Jan 27 12:26:52 crc kubenswrapper[4900]: ],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:RHEL8_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/rhel8/bin/,ValueFrom:nil,},EnvVar{Name:RHEL9_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/rhel9/bin/,ValueFrom:nil,},EnvVar{Name:DEFAULT_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/bin/,ValueFrom:nil,},EnvVar{Name:KUBERNETES_SERVICE_PORT,Value:6443,ValueFrom:nil,},EnvVar{Name:KUBERNETES_SERVICE_HOST,Value:api-int.crc.testing,ValueFrom:nil,},EnvVar{Name:MULTUS_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:K8S_NODE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cni-binary-copy,ReadOnly:false,MountPath:/entrypoint,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:os-release,ReadOnly:false,MountPath:/host/etc/os-release,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:system-cni-dir,ReadOnly:false,MountPath:/host/etc/cni/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-cni-dir,ReadOnly:false,MountPath:/host/run/multus/cni/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cnibin,ReadOnly:false,MountPath:/host/opt/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-socket-dir-parent,ReadOnly:false,MountPath:/host/run/multus,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-k8s-cni-cncf-io,ReadOnly:false,MountPath:/run/k8s.cni.cncf.io,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-netns,ReadOnly:false,MountPath:/run/netns,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-cni-bin,ReadOnly:false,MountPath:/var/lib/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-cni-multus,ReadOnly:false,MountPath:/var/lib/cni/multus,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-kubelet,ReadOnly:false,MountPath:/var/lib/kubelet,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:hostroot,ReadOnly:false,MountPath:/hostroot,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-conf-dir,ReadOnly:false,MountPath:/etc/cni/multus/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-daemon-config,ReadOnly:true,MountPath:/etc/cni/net.d/multus.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-multus-certs,ReadOnly:false,MountPath:/etc/cni/multus/certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-kubernetes,ReadOnly:false,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9hf9p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod multus-twlmq_openshift-multus(02bfa799-f281-465d-ab6f-19ea9c16979c): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 27 12:26:52 crc kubenswrapper[4900]: > logger="UnhandledError" Jan 27 12:26:52 crc kubenswrapper[4900]: E0127 12:26:52.733382 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-multus/multus-twlmq" podUID="02bfa799-f281-465d-ab6f-19ea9c16979c" Jan 27 12:26:52 crc kubenswrapper[4900]: W0127 12:26:52.737044 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod206345a2_ea7e_4a32_8c93_414290ba5c92.slice/crio-085b5582bc0b4b7f7ad1520937b7fc246ee2986a7cf371e49863e0a126ecee37 WatchSource:0}: Error finding container 085b5582bc0b4b7f7ad1520937b7fc246ee2986a7cf371e49863e0a126ecee37: Status 404 returned error can't find the container with id 085b5582bc0b4b7f7ad1520937b7fc246ee2986a7cf371e49863e0a126ecee37 Jan 27 12:26:52 crc kubenswrapper[4900]: E0127 12:26:52.739799 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:egress-router-binary-copy,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,Command:[/entrypoint/cnibincopy.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:RHEL8_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/rhel8/bin/,ValueFrom:nil,},EnvVar{Name:RHEL9_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/rhel9/bin/,ValueFrom:nil,},EnvVar{Name:DEFAULT_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/bin/,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cni-binary-copy,ReadOnly:false,MountPath:/entrypoint,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cnibin,ReadOnly:false,MountPath:/host/opt/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:os-release,ReadOnly:true,MountPath:/host/etc/os-release,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hg9l4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod multus-additional-cni-plugins-lrt6s_openshift-multus(206345a2-ea7e-4a32-8c93-414290ba5c92): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Jan 27 12:26:52 crc kubenswrapper[4900]: E0127 12:26:52.740957 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"egress-router-binary-copy\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" podUID="206345a2-ea7e-4a32-8c93-414290ba5c92" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.773442 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-dqltj"] Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.774389 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.775933 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.776301 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.776435 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.776481 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.777157 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.778260 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.778498 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.789847 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovn-node-metrics-cert\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.789898 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-ovn\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.789996 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-kubelet\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790034 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-systemd-units\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790070 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-var-lib-openvswitch\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790095 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-etc-openvswitch\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790113 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovnkube-script-lib\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790132 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-log-socket\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790152 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2h7s6\" (UniqueName: \"kubernetes.io/projected/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-kube-api-access-2h7s6\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790167 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-cni-bin\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790191 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-run-netns\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790207 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-run-ovn-kubernetes\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790221 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-cni-netd\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790239 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-env-overrides\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790264 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovnkube-config\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790278 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-slash\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790294 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-openvswitch\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790312 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-systemd\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790329 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-node-log\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.790349 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.795972 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.812007 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.823465 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.833199 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.844848 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.855636 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.868744 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.881666 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891011 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2h7s6\" (UniqueName: \"kubernetes.io/projected/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-kube-api-access-2h7s6\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891171 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-run-netns\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891203 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-run-ovn-kubernetes\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891295 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-cni-bin\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891315 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-run-netns\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891373 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-cni-netd\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891378 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-run-ovn-kubernetes\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891460 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-cni-netd\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891452 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-env-overrides\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891479 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-cni-bin\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891746 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovnkube-config\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891814 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-slash\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891851 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-openvswitch\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891885 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-systemd\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891894 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-slash\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891908 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-node-log\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891939 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891963 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-systemd\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.891957 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-openvswitch\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892006 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892025 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-node-log\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892039 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovn-node-metrics-cert\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892145 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-env-overrides\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892106 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-ovn\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892176 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-ovn\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892224 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-kubelet\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892244 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-systemd-units\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892260 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-var-lib-openvswitch\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892275 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-etc-openvswitch\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892290 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovnkube-script-lib\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892311 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-log-socket\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892348 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-kubelet\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892360 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-log-socket\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892360 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-systemd-units\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892383 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-etc-openvswitch\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892382 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-var-lib-openvswitch\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.892407 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovnkube-config\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.893073 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovnkube-script-lib\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.895428 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovn-node-metrics-cert\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.897278 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.907658 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.911569 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2h7s6\" (UniqueName: \"kubernetes.io/projected/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-kube-api-access-2h7s6\") pod \"ovnkube-node-dqltj\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.918241 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.931853 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:52 crc kubenswrapper[4900]: I0127 12:26:52.941149 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.085321 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:26:53 crc kubenswrapper[4900]: W0127 12:26:53.096334 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f2b59d6_a608_43ab_a496_c2d0b46b6c2f.slice/crio-d7e2abfcc9368485b2086b239d1f164898d71197fb9f86ede4ba99cc1a4ffdd6 WatchSource:0}: Error finding container d7e2abfcc9368485b2086b239d1f164898d71197fb9f86ede4ba99cc1a4ffdd6: Status 404 returned error can't find the container with id d7e2abfcc9368485b2086b239d1f164898d71197fb9f86ede4ba99cc1a4ffdd6 Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.098720 4900 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 27 12:26:53 crc kubenswrapper[4900]: init container &Container{Name:kubecfg-setup,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c cat << EOF > /etc/ovn/kubeconfig Jan 27 12:26:53 crc kubenswrapper[4900]: apiVersion: v1 Jan 27 12:26:53 crc kubenswrapper[4900]: clusters: Jan 27 12:26:53 crc kubenswrapper[4900]: - cluster: Jan 27 12:26:53 crc kubenswrapper[4900]: certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt Jan 27 12:26:53 crc kubenswrapper[4900]: server: https://api-int.crc.testing:6443 Jan 27 12:26:53 crc kubenswrapper[4900]: name: default-cluster Jan 27 12:26:53 crc kubenswrapper[4900]: contexts: Jan 27 12:26:53 crc kubenswrapper[4900]: - context: Jan 27 12:26:53 crc kubenswrapper[4900]: cluster: default-cluster Jan 27 12:26:53 crc kubenswrapper[4900]: namespace: default Jan 27 12:26:53 crc kubenswrapper[4900]: user: default-auth Jan 27 12:26:53 crc kubenswrapper[4900]: name: default-context Jan 27 12:26:53 crc kubenswrapper[4900]: current-context: default-context Jan 27 12:26:53 crc kubenswrapper[4900]: kind: Config Jan 27 12:26:53 crc kubenswrapper[4900]: preferences: {} Jan 27 12:26:53 crc kubenswrapper[4900]: users: Jan 27 12:26:53 crc kubenswrapper[4900]: - name: default-auth Jan 27 12:26:53 crc kubenswrapper[4900]: user: Jan 27 12:26:53 crc kubenswrapper[4900]: client-certificate: /etc/ovn/ovnkube-node-certs/ovnkube-client-current.pem Jan 27 12:26:53 crc kubenswrapper[4900]: client-key: /etc/ovn/ovnkube-node-certs/ovnkube-client-current.pem Jan 27 12:26:53 crc kubenswrapper[4900]: EOF Jan 27 12:26:53 crc kubenswrapper[4900]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-openvswitch,ReadOnly:false,MountPath:/etc/ovn/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2h7s6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovnkube-node-dqltj_openshift-ovn-kubernetes(8f2b59d6-a608-43ab-a496-c2d0b46b6c2f): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 27 12:26:53 crc kubenswrapper[4900]: > logger="UnhandledError" Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.099927 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kubecfg-setup\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.212819 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" event={"ID":"206345a2-ea7e-4a32-8c93-414290ba5c92","Type":"ContainerStarted","Data":"085b5582bc0b4b7f7ad1520937b7fc246ee2986a7cf371e49863e0a126ecee37"} Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.213543 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"2df2ab19f05d1c3fad431ebab8a8e286ad480aa4e0f858d65500ca1b367492b6"} Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.215001 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:machine-config-daemon,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a,Command:[/usr/bin/machine-config-daemon],Args:[start --payload-version=4.18.1],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:health,HostPort:8798,ContainerPort:8798,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:rootfs,ReadOnly:false,MountPath:/rootfs,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-664p6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/health,Port:{0 8798 },Host:127.0.0.1,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:120,TimeoutSeconds:1,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.215224 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-twlmq" event={"ID":"02bfa799-f281-465d-ab6f-19ea9c16979c","Type":"ContainerStarted","Data":"afb9d0d107c2cd3397ade5f06e50d365543d09c56f53be5e2a2944ea5d2af108"} Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.215344 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:egress-router-binary-copy,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,Command:[/entrypoint/cnibincopy.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:RHEL8_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/rhel8/bin/,ValueFrom:nil,},EnvVar{Name:RHEL9_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/rhel9/bin/,ValueFrom:nil,},EnvVar{Name:DEFAULT_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/bin/,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cni-binary-copy,ReadOnly:false,MountPath:/entrypoint,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cnibin,ReadOnly:false,MountPath:/host/opt/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:os-release,ReadOnly:true,MountPath:/host/etc/os-release,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hg9l4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod multus-additional-cni-plugins-lrt6s_openshift-multus(206345a2-ea7e-4a32-8c93-414290ba5c92): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.216345 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerStarted","Data":"d7e2abfcc9368485b2086b239d1f164898d71197fb9f86ede4ba99cc1a4ffdd6"} Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.216857 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"egress-router-binary-copy\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" podUID="206345a2-ea7e-4a32-8c93-414290ba5c92" Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.217303 4900 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 27 12:26:53 crc kubenswrapper[4900]: container &Container{Name:kube-multus,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,Command:[/bin/bash -ec --],Args:[MULTUS_DAEMON_OPT="" Jan 27 12:26:53 crc kubenswrapper[4900]: /entrypoint/cnibincopy.sh; exec /usr/src/multus-cni/bin/multus-daemon $MULTUS_DAEMON_OPT Jan 27 12:26:53 crc kubenswrapper[4900]: ],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:RHEL8_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/rhel8/bin/,ValueFrom:nil,},EnvVar{Name:RHEL9_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/rhel9/bin/,ValueFrom:nil,},EnvVar{Name:DEFAULT_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/bin/,ValueFrom:nil,},EnvVar{Name:KUBERNETES_SERVICE_PORT,Value:6443,ValueFrom:nil,},EnvVar{Name:KUBERNETES_SERVICE_HOST,Value:api-int.crc.testing,ValueFrom:nil,},EnvVar{Name:MULTUS_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:K8S_NODE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cni-binary-copy,ReadOnly:false,MountPath:/entrypoint,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:os-release,ReadOnly:false,MountPath:/host/etc/os-release,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:system-cni-dir,ReadOnly:false,MountPath:/host/etc/cni/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-cni-dir,ReadOnly:false,MountPath:/host/run/multus/cni/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cnibin,ReadOnly:false,MountPath:/host/opt/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-socket-dir-parent,ReadOnly:false,MountPath:/host/run/multus,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-k8s-cni-cncf-io,ReadOnly:false,MountPath:/run/k8s.cni.cncf.io,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-netns,ReadOnly:false,MountPath:/run/netns,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-cni-bin,ReadOnly:false,MountPath:/var/lib/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-cni-multus,ReadOnly:false,MountPath:/var/lib/cni/multus,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-kubelet,ReadOnly:false,MountPath:/var/lib/kubelet,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:hostroot,ReadOnly:false,MountPath:/hostroot,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-conf-dir,ReadOnly:false,MountPath:/etc/cni/multus/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-daemon-config,ReadOnly:true,MountPath:/etc/cni/net.d/multus.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-multus-certs,ReadOnly:false,MountPath:/etc/cni/multus/certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-kubernetes,ReadOnly:false,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9hf9p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod multus-twlmq_openshift-multus(02bfa799-f281-465d-ab6f-19ea9c16979c): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 27 12:26:53 crc kubenswrapper[4900]: > logger="UnhandledError" Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.217650 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,Command:[],Args:[--secure-listen-address=0.0.0.0:9001 --config-file=/etc/kube-rbac-proxy/config-file.yaml --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 --tls-min-version=VersionTLS12 --upstream=http://127.0.0.1:8797 --logtostderr=true --tls-cert-file=/etc/tls/private/tls.crt --tls-private-key-file=/etc/tls/private/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:9001,ContainerPort:9001,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:proxy-tls,ReadOnly:false,MountPath:/etc/tls/private,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:mcd-auth-proxy-config,ReadOnly:false,MountPath:/etc/kube-rbac-proxy,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-664p6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.218378 4900 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 27 12:26:53 crc kubenswrapper[4900]: init container &Container{Name:kubecfg-setup,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c cat << EOF > /etc/ovn/kubeconfig Jan 27 12:26:53 crc kubenswrapper[4900]: apiVersion: v1 Jan 27 12:26:53 crc kubenswrapper[4900]: clusters: Jan 27 12:26:53 crc kubenswrapper[4900]: - cluster: Jan 27 12:26:53 crc kubenswrapper[4900]: certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt Jan 27 12:26:53 crc kubenswrapper[4900]: server: https://api-int.crc.testing:6443 Jan 27 12:26:53 crc kubenswrapper[4900]: name: default-cluster Jan 27 12:26:53 crc kubenswrapper[4900]: contexts: Jan 27 12:26:53 crc kubenswrapper[4900]: - context: Jan 27 12:26:53 crc kubenswrapper[4900]: cluster: default-cluster Jan 27 12:26:53 crc kubenswrapper[4900]: namespace: default Jan 27 12:26:53 crc kubenswrapper[4900]: user: default-auth Jan 27 12:26:53 crc kubenswrapper[4900]: name: default-context Jan 27 12:26:53 crc kubenswrapper[4900]: current-context: default-context Jan 27 12:26:53 crc kubenswrapper[4900]: kind: Config Jan 27 12:26:53 crc kubenswrapper[4900]: preferences: {} Jan 27 12:26:53 crc kubenswrapper[4900]: users: Jan 27 12:26:53 crc kubenswrapper[4900]: - name: default-auth Jan 27 12:26:53 crc kubenswrapper[4900]: user: Jan 27 12:26:53 crc kubenswrapper[4900]: client-certificate: /etc/ovn/ovnkube-node-certs/ovnkube-client-current.pem Jan 27 12:26:53 crc kubenswrapper[4900]: client-key: /etc/ovn/ovnkube-node-certs/ovnkube-client-current.pem Jan 27 12:26:53 crc kubenswrapper[4900]: EOF Jan 27 12:26:53 crc kubenswrapper[4900]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-openvswitch,ReadOnly:false,MountPath:/etc/ovn/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2h7s6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovnkube-node-dqltj_openshift-ovn-kubernetes(8f2b59d6-a608-43ab-a496-c2d0b46b6c2f): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 27 12:26:53 crc kubenswrapper[4900]: > logger="UnhandledError" Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.219374 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"machine-config-daemon\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.219407 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-multus/multus-twlmq" podUID="02bfa799-f281-465d-ab6f-19ea9c16979c" Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.219437 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kubecfg-setup\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.229789 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.240940 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.259260 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.270458 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.279871 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.287701 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.298710 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.310170 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.320902 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.334225 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.349592 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.360882 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.370458 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.381371 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.391264 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.396176 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.396302 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.396339 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.396421 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:26:57.396382963 +0000 UTC m=+44.633411173 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.396440 4900 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.396512 4900 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.396517 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:57.396499607 +0000 UTC m=+44.633527847 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.396605 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:57.396585189 +0000 UTC m=+44.633613409 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.403273 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.412283 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.427021 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.436294 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.447605 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.457099 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.466586 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.474714 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.481690 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.481736 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.481792 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.481845 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.481955 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.482050 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.484524 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.495032 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.511797 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.602681 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.602729 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.602821 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.602855 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.602869 4900 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.602827 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.602920 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:57.602902734 +0000 UTC m=+44.839930944 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.602925 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.602946 4900 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:53 crc kubenswrapper[4900]: E0127 12:26:53.602987 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 12:26:57.602977516 +0000 UTC m=+44.840005726 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:53 crc kubenswrapper[4900]: I0127 12:26:53.613523 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 15:39:54.780104965 +0000 UTC Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.613959 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 00:36:39.184035282 +0000 UTC Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.867365 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-5s5j4"] Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.868519 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-5s5j4" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.871349 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.874774 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.874847 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.874917 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.889179 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.902617 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.917020 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.930600 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.942136 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5s5j4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6d0716b-f743-4acf-bf23-060e177011ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lcvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5s5j4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.947473 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b6d0716b-f743-4acf-bf23-060e177011ee-serviceca\") pod \"node-ca-5s5j4\" (UID: \"b6d0716b-f743-4acf-bf23-060e177011ee\") " pod="openshift-image-registry/node-ca-5s5j4" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.947522 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b6d0716b-f743-4acf-bf23-060e177011ee-host\") pod \"node-ca-5s5j4\" (UID: \"b6d0716b-f743-4acf-bf23-060e177011ee\") " pod="openshift-image-registry/node-ca-5s5j4" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.947607 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lcvx\" (UniqueName: \"kubernetes.io/projected/b6d0716b-f743-4acf-bf23-060e177011ee-kube-api-access-2lcvx\") pod \"node-ca-5s5j4\" (UID: \"b6d0716b-f743-4acf-bf23-060e177011ee\") " pod="openshift-image-registry/node-ca-5s5j4" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.958648 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.977953 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:54 crc kubenswrapper[4900]: I0127 12:26:54.994624 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.008612 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.030582 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.045111 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.048716 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b6d0716b-f743-4acf-bf23-060e177011ee-serviceca\") pod \"node-ca-5s5j4\" (UID: \"b6d0716b-f743-4acf-bf23-060e177011ee\") " pod="openshift-image-registry/node-ca-5s5j4" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.048771 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b6d0716b-f743-4acf-bf23-060e177011ee-host\") pod \"node-ca-5s5j4\" (UID: \"b6d0716b-f743-4acf-bf23-060e177011ee\") " pod="openshift-image-registry/node-ca-5s5j4" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.048823 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lcvx\" (UniqueName: \"kubernetes.io/projected/b6d0716b-f743-4acf-bf23-060e177011ee-kube-api-access-2lcvx\") pod \"node-ca-5s5j4\" (UID: \"b6d0716b-f743-4acf-bf23-060e177011ee\") " pod="openshift-image-registry/node-ca-5s5j4" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.048901 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b6d0716b-f743-4acf-bf23-060e177011ee-host\") pod \"node-ca-5s5j4\" (UID: \"b6d0716b-f743-4acf-bf23-060e177011ee\") " pod="openshift-image-registry/node-ca-5s5j4" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.050453 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b6d0716b-f743-4acf-bf23-060e177011ee-serviceca\") pod \"node-ca-5s5j4\" (UID: \"b6d0716b-f743-4acf-bf23-060e177011ee\") " pod="openshift-image-registry/node-ca-5s5j4" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.061560 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.071903 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lcvx\" (UniqueName: \"kubernetes.io/projected/b6d0716b-f743-4acf-bf23-060e177011ee-kube-api-access-2lcvx\") pod \"node-ca-5s5j4\" (UID: \"b6d0716b-f743-4acf-bf23-060e177011ee\") " pod="openshift-image-registry/node-ca-5s5j4" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.077078 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.095712 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.183242 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-5s5j4" Jan 27 12:26:55 crc kubenswrapper[4900]: E0127 12:26:55.200508 4900 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 27 12:26:55 crc kubenswrapper[4900]: container &Container{Name:node-ca,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f,Command:[/bin/sh -c trap 'jobs -p | xargs -r kill; echo shutting down node-ca; exit 0' TERM Jan 27 12:26:55 crc kubenswrapper[4900]: while [ true ]; Jan 27 12:26:55 crc kubenswrapper[4900]: do Jan 27 12:26:55 crc kubenswrapper[4900]: for f in $(ls /tmp/serviceca); do Jan 27 12:26:55 crc kubenswrapper[4900]: echo $f Jan 27 12:26:55 crc kubenswrapper[4900]: ca_file_path="/tmp/serviceca/${f}" Jan 27 12:26:55 crc kubenswrapper[4900]: f=$(echo $f | sed -r 's/(.*)\.\./\1:/') Jan 27 12:26:55 crc kubenswrapper[4900]: reg_dir_path="/etc/docker/certs.d/${f}" Jan 27 12:26:55 crc kubenswrapper[4900]: if [ -e "${reg_dir_path}" ]; then Jan 27 12:26:55 crc kubenswrapper[4900]: cp -u $ca_file_path $reg_dir_path/ca.crt Jan 27 12:26:55 crc kubenswrapper[4900]: else Jan 27 12:26:55 crc kubenswrapper[4900]: mkdir $reg_dir_path Jan 27 12:26:55 crc kubenswrapper[4900]: cp $ca_file_path $reg_dir_path/ca.crt Jan 27 12:26:55 crc kubenswrapper[4900]: fi Jan 27 12:26:55 crc kubenswrapper[4900]: done Jan 27 12:26:55 crc kubenswrapper[4900]: for d in $(ls /etc/docker/certs.d); do Jan 27 12:26:55 crc kubenswrapper[4900]: echo $d Jan 27 12:26:55 crc kubenswrapper[4900]: dp=$(echo $d | sed -r 's/(.*):/\1\.\./') Jan 27 12:26:55 crc kubenswrapper[4900]: reg_conf_path="/tmp/serviceca/${dp}" Jan 27 12:26:55 crc kubenswrapper[4900]: if [ ! -e "${reg_conf_path}" ]; then Jan 27 12:26:55 crc kubenswrapper[4900]: rm -rf /etc/docker/certs.d/$d Jan 27 12:26:55 crc kubenswrapper[4900]: fi Jan 27 12:26:55 crc kubenswrapper[4900]: done Jan 27 12:26:55 crc kubenswrapper[4900]: sleep 60 & wait ${!} Jan 27 12:26:55 crc kubenswrapper[4900]: done Jan 27 12:26:55 crc kubenswrapper[4900]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{10485760 0} {} 10Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:serviceca,ReadOnly:false,MountPath:/tmp/serviceca,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host,ReadOnly:false,MountPath:/etc/docker/certs.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2lcvx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:*1001,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*0,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod node-ca-5s5j4_openshift-image-registry(b6d0716b-f743-4acf-bf23-060e177011ee): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 27 12:26:55 crc kubenswrapper[4900]: > logger="UnhandledError" Jan 27 12:26:55 crc kubenswrapper[4900]: E0127 12:26:55.201939 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"node-ca\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-image-registry/node-ca-5s5j4" podUID="b6d0716b-f743-4acf-bf23-060e177011ee" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.223078 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-5s5j4" event={"ID":"b6d0716b-f743-4acf-bf23-060e177011ee","Type":"ContainerStarted","Data":"4a920c21fd9213e4cfb146bd398661fec7a83458b85cc4642124af3eac54d815"} Jan 27 12:26:55 crc kubenswrapper[4900]: E0127 12:26:55.224760 4900 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 27 12:26:55 crc kubenswrapper[4900]: container &Container{Name:node-ca,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f,Command:[/bin/sh -c trap 'jobs -p | xargs -r kill; echo shutting down node-ca; exit 0' TERM Jan 27 12:26:55 crc kubenswrapper[4900]: while [ true ]; Jan 27 12:26:55 crc kubenswrapper[4900]: do Jan 27 12:26:55 crc kubenswrapper[4900]: for f in $(ls /tmp/serviceca); do Jan 27 12:26:55 crc kubenswrapper[4900]: echo $f Jan 27 12:26:55 crc kubenswrapper[4900]: ca_file_path="/tmp/serviceca/${f}" Jan 27 12:26:55 crc kubenswrapper[4900]: f=$(echo $f | sed -r 's/(.*)\.\./\1:/') Jan 27 12:26:55 crc kubenswrapper[4900]: reg_dir_path="/etc/docker/certs.d/${f}" Jan 27 12:26:55 crc kubenswrapper[4900]: if [ -e "${reg_dir_path}" ]; then Jan 27 12:26:55 crc kubenswrapper[4900]: cp -u $ca_file_path $reg_dir_path/ca.crt Jan 27 12:26:55 crc kubenswrapper[4900]: else Jan 27 12:26:55 crc kubenswrapper[4900]: mkdir $reg_dir_path Jan 27 12:26:55 crc kubenswrapper[4900]: cp $ca_file_path $reg_dir_path/ca.crt Jan 27 12:26:55 crc kubenswrapper[4900]: fi Jan 27 12:26:55 crc kubenswrapper[4900]: done Jan 27 12:26:55 crc kubenswrapper[4900]: for d in $(ls /etc/docker/certs.d); do Jan 27 12:26:55 crc kubenswrapper[4900]: echo $d Jan 27 12:26:55 crc kubenswrapper[4900]: dp=$(echo $d | sed -r 's/(.*):/\1\.\./') Jan 27 12:26:55 crc kubenswrapper[4900]: reg_conf_path="/tmp/serviceca/${dp}" Jan 27 12:26:55 crc kubenswrapper[4900]: if [ ! -e "${reg_conf_path}" ]; then Jan 27 12:26:55 crc kubenswrapper[4900]: rm -rf /etc/docker/certs.d/$d Jan 27 12:26:55 crc kubenswrapper[4900]: fi Jan 27 12:26:55 crc kubenswrapper[4900]: done Jan 27 12:26:55 crc kubenswrapper[4900]: sleep 60 & wait ${!} Jan 27 12:26:55 crc kubenswrapper[4900]: done Jan 27 12:26:55 crc kubenswrapper[4900]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{10485760 0} {} 10Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:serviceca,ReadOnly:false,MountPath:/tmp/serviceca,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host,ReadOnly:false,MountPath:/etc/docker/certs.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2lcvx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:*1001,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*0,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod node-ca-5s5j4_openshift-image-registry(b6d0716b-f743-4acf-bf23-060e177011ee): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 27 12:26:55 crc kubenswrapper[4900]: > logger="UnhandledError" Jan 27 12:26:55 crc kubenswrapper[4900]: E0127 12:26:55.226629 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"node-ca\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-image-registry/node-ca-5s5j4" podUID="b6d0716b-f743-4acf-bf23-060e177011ee" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.232454 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5s5j4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6d0716b-f743-4acf-bf23-060e177011ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lcvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5s5j4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.243854 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.253783 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.263769 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.271154 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.281927 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.293022 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.302851 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.312747 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.327511 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.334912 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.345444 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.362972 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.375917 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.481101 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.481205 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.481288 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:26:55 crc kubenswrapper[4900]: E0127 12:26:55.481415 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:26:55 crc kubenswrapper[4900]: E0127 12:26:55.481711 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:26:55 crc kubenswrapper[4900]: E0127 12:26:55.482013 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.576162 4900 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 27 12:26:55 crc kubenswrapper[4900]: W0127 12:26:55.576564 4900 reflector.go:484] object-"openshift-image-registry"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 27 12:26:55 crc kubenswrapper[4900]: W0127 12:26:55.576650 4900 reflector.go:484] object-"openshift-image-registry"/"node-ca-dockercfg-4777p": watch of *v1.Secret ended with: very short watch: object-"openshift-image-registry"/"node-ca-dockercfg-4777p": Unexpected watch close - watch lasted less than a second and no items received Jan 27 12:26:55 crc kubenswrapper[4900]: W0127 12:26:55.576650 4900 reflector.go:484] object-"openshift-image-registry"/"image-registry-certificates": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"image-registry-certificates": Unexpected watch close - watch lasted less than a second and no items received Jan 27 12:26:55 crc kubenswrapper[4900]: W0127 12:26:55.577531 4900 reflector.go:484] object-"openshift-image-registry"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 27 12:26:55 crc kubenswrapper[4900]: I0127 12:26:55.614594 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 09:26:45.946079363 +0000 UTC Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.496217 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.508682 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.524884 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.559812 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.572869 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5s5j4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6d0716b-f743-4acf-bf23-060e177011ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lcvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5s5j4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.577636 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.598023 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.598885 4900 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.602247 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.602302 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.602313 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.602562 4900 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.612890 4900 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.613221 4900 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.613427 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.614796 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 03:50:29.406870467 +0000 UTC Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.615601 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.615731 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.615852 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.615934 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.616024 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:56Z","lastTransitionTime":"2026-01-27T12:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.639517 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: E0127 12:26:56.639845 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9d7606d1-10b8-4621-b721-e2ce018de34c\\\",\\\"systemUUID\\\":\\\"95da1c6e-623c-4d7b-a8e5-ff27d2b5f353\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.645614 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.645737 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.645796 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.645856 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.645938 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:56Z","lastTransitionTime":"2026-01-27T12:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.656027 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.657867 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: E0127 12:26:56.669166 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9d7606d1-10b8-4621-b721-e2ce018de34c\\\",\\\"systemUUID\\\":\\\"95da1c6e-623c-4d7b-a8e5-ff27d2b5f353\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.673856 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.673897 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.673909 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.673927 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.673939 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:56Z","lastTransitionTime":"2026-01-27T12:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.681043 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: E0127 12:26:56.684416 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9d7606d1-10b8-4621-b721-e2ce018de34c\\\",\\\"systemUUID\\\":\\\"95da1c6e-623c-4d7b-a8e5-ff27d2b5f353\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.688642 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.688675 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.688684 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.688700 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.688711 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:56Z","lastTransitionTime":"2026-01-27T12:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.690022 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: E0127 12:26:56.698835 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9d7606d1-10b8-4621-b721-e2ce018de34c\\\",\\\"systemUUID\\\":\\\"95da1c6e-623c-4d7b-a8e5-ff27d2b5f353\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.701458 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.714870 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.719789 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.719835 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.719850 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.719869 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.719880 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:56Z","lastTransitionTime":"2026-01-27T12:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:56 crc kubenswrapper[4900]: E0127 12:26:56.732568 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9d7606d1-10b8-4621-b721-e2ce018de34c\\\",\\\"systemUUID\\\":\\\"95da1c6e-623c-4d7b-a8e5-ff27d2b5f353\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: E0127 12:26:56.732696 4900 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.732709 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.734853 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.734900 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.734920 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.734945 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.734970 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:56Z","lastTransitionTime":"2026-01-27T12:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.776777 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.838261 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.838681 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.838855 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.838985 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.839209 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:56Z","lastTransitionTime":"2026-01-27T12:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.897003 4900 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.942357 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.942403 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.942411 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.942427 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.942467 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:56Z","lastTransitionTime":"2026-01-27T12:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:56 crc kubenswrapper[4900]: I0127 12:26:56.968078 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.045263 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.045326 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.045345 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.045361 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.045381 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:57Z","lastTransitionTime":"2026-01-27T12:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.149284 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.149343 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.149355 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.149373 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.149383 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:57Z","lastTransitionTime":"2026-01-27T12:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.253670 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.253711 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.253722 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.253739 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.253750 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:57Z","lastTransitionTime":"2026-01-27T12:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.355871 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.355910 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.355918 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.355932 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.355944 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:57Z","lastTransitionTime":"2026-01-27T12:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.455713 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.456885 4900 scope.go:117] "RemoveContainer" containerID="1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b" Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.457199 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.459384 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.459439 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.459451 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.459471 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.459485 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:57Z","lastTransitionTime":"2026-01-27T12:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.481746 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.481930 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.482172 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.482339 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.482659 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.482829 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.494448 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.494616 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.494682 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.494838 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:05.494794764 +0000 UTC m=+52.731822984 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.494893 4900 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.494895 4900 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.494992 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 12:27:05.494968389 +0000 UTC m=+52.731996599 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.495030 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 12:27:05.495005 +0000 UTC m=+52.732033200 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.579222 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.579270 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.579280 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.579303 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.579313 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:57Z","lastTransitionTime":"2026-01-27T12:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.615015 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 13:54:26.00117795 +0000 UTC Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.718843 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.718904 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.719232 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.719241 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.719312 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.719335 4900 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.719273 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.719440 4900 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.719477 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 12:27:05.719443268 +0000 UTC m=+52.956471508 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:57 crc kubenswrapper[4900]: E0127 12:26:57.719530 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 12:27:05.71951235 +0000 UTC m=+52.956540560 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.719703 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.719774 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.719787 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.719820 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.719833 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:57Z","lastTransitionTime":"2026-01-27T12:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.822754 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.822794 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.822806 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.822824 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.822835 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:57Z","lastTransitionTime":"2026-01-27T12:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.925279 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.925331 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.925342 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.925360 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:57 crc kubenswrapper[4900]: I0127 12:26:57.925379 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:57Z","lastTransitionTime":"2026-01-27T12:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.028858 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.028904 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.028920 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.028936 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.028961 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:58Z","lastTransitionTime":"2026-01-27T12:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.131381 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.131427 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.131438 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.131458 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.131471 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:58Z","lastTransitionTime":"2026-01-27T12:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.233947 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.233988 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.233997 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.234012 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.234023 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:58Z","lastTransitionTime":"2026-01-27T12:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.376319 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.376403 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.376417 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.376444 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.376458 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:58Z","lastTransitionTime":"2026-01-27T12:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.497563 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.497617 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.497637 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.497658 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.497670 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:58Z","lastTransitionTime":"2026-01-27T12:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.601321 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.601390 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.601402 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.601458 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.601480 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:58Z","lastTransitionTime":"2026-01-27T12:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.615548 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 16:19:42.069219043 +0000 UTC Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.708427 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.708485 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.708498 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.708515 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.708529 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:58Z","lastTransitionTime":"2026-01-27T12:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.811745 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.811788 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.811806 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.811822 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.811834 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:58Z","lastTransitionTime":"2026-01-27T12:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.914094 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.914147 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.914159 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.914187 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:58 crc kubenswrapper[4900]: I0127 12:26:58.914201 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:58Z","lastTransitionTime":"2026-01-27T12:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.017221 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.017298 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.017316 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.017343 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.017361 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:59Z","lastTransitionTime":"2026-01-27T12:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.121217 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.121273 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.121289 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.121314 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.121326 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:59Z","lastTransitionTime":"2026-01-27T12:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.223748 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.223990 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.224067 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.224145 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.224217 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:59Z","lastTransitionTime":"2026-01-27T12:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.326705 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.326763 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.326777 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.326795 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.326815 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:59Z","lastTransitionTime":"2026-01-27T12:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.434027 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.434100 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.434111 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.434127 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.434139 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:59Z","lastTransitionTime":"2026-01-27T12:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.480858 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.480899 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.480922 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:26:59 crc kubenswrapper[4900]: E0127 12:26:59.481114 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:26:59 crc kubenswrapper[4900]: E0127 12:26:59.481216 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:26:59 crc kubenswrapper[4900]: E0127 12:26:59.481363 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.536758 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.536794 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.536802 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.536819 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.536831 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:59Z","lastTransitionTime":"2026-01-27T12:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.616204 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 14:42:23.564256013 +0000 UTC Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.639430 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.639477 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.639492 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.639510 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.639522 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:59Z","lastTransitionTime":"2026-01-27T12:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.742576 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.742618 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.742631 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.742650 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.742663 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:59Z","lastTransitionTime":"2026-01-27T12:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.845200 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.845261 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.845274 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.845291 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.845304 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:59Z","lastTransitionTime":"2026-01-27T12:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.948741 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.948797 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.948812 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.948834 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:26:59 crc kubenswrapper[4900]: I0127 12:26:59.948849 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:26:59Z","lastTransitionTime":"2026-01-27T12:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.051179 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.051224 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.051239 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.051267 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.051283 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:00Z","lastTransitionTime":"2026-01-27T12:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.154240 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.154317 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.154342 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.154378 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.154401 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:00Z","lastTransitionTime":"2026-01-27T12:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.256804 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.256849 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.256861 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.256875 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.256884 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:00Z","lastTransitionTime":"2026-01-27T12:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.359774 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.359812 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.359821 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.359840 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.359849 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:00Z","lastTransitionTime":"2026-01-27T12:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.463071 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.463113 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.463122 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.463144 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.463155 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:00Z","lastTransitionTime":"2026-01-27T12:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.566956 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.566994 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.567003 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.567020 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.567031 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:00Z","lastTransitionTime":"2026-01-27T12:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.617285 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 03:12:30.922064327 +0000 UTC Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.669453 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.669506 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.669528 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.669546 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.669557 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:00Z","lastTransitionTime":"2026-01-27T12:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.772123 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.772168 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.772176 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.772191 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.772199 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:00Z","lastTransitionTime":"2026-01-27T12:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.875782 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.875830 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.875841 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.875858 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.875868 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:00Z","lastTransitionTime":"2026-01-27T12:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.979100 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.979162 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.979181 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.979198 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:00 crc kubenswrapper[4900]: I0127 12:27:00.979211 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:00Z","lastTransitionTime":"2026-01-27T12:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.087267 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.087335 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.087351 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.087371 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.087382 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:01Z","lastTransitionTime":"2026-01-27T12:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.198351 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.198412 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.198611 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.198644 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.198667 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:01Z","lastTransitionTime":"2026-01-27T12:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.302665 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.302722 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.302739 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.302785 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.302816 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:01Z","lastTransitionTime":"2026-01-27T12:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.406168 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.406227 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.406239 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.406264 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.406280 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:01Z","lastTransitionTime":"2026-01-27T12:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.481774 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.481866 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.481769 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:01 crc kubenswrapper[4900]: E0127 12:27:01.482248 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:27:01 crc kubenswrapper[4900]: E0127 12:27:01.482311 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:27:01 crc kubenswrapper[4900]: E0127 12:27:01.482396 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.514258 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.514311 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.514321 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.514340 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.514354 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:01Z","lastTransitionTime":"2026-01-27T12:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.616252 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.616297 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.616317 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.616331 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.616342 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:01Z","lastTransitionTime":"2026-01-27T12:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.618397 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 22:06:28.84265208 +0000 UTC Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.720783 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.720848 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.720861 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.720877 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.720888 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:01Z","lastTransitionTime":"2026-01-27T12:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.823624 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.823671 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.823681 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.823697 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.823710 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:01Z","lastTransitionTime":"2026-01-27T12:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.927873 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.927947 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.927968 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.928004 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:01 crc kubenswrapper[4900]: I0127 12:27:01.928038 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:01Z","lastTransitionTime":"2026-01-27T12:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.032025 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.032125 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.032145 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.032169 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.032184 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:02Z","lastTransitionTime":"2026-01-27T12:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.136348 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.136407 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.136422 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.136453 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.136471 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:02Z","lastTransitionTime":"2026-01-27T12:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.240180 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.240230 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.240245 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.240264 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.240278 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:02Z","lastTransitionTime":"2026-01-27T12:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.254978 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"1015d8309da830a3f85f3f718b03fa13110edc2c782c946d79ccad6e0aeb6d4c"} Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.272432 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.286221 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.297384 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5s5j4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6d0716b-f743-4acf-bf23-060e177011ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lcvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5s5j4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.310732 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.324897 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.338704 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.342612 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.342646 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.342669 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.342696 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.342710 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:02Z","lastTransitionTime":"2026-01-27T12:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.446526 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.446602 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.446617 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.446636 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.446650 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:02Z","lastTransitionTime":"2026-01-27T12:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.478631 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.488931 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.501819 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.512588 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.525790 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1015d8309da830a3f85f3f718b03fa13110edc2c782c946d79ccad6e0aeb6d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.548458 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.549572 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.549626 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.549640 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.549658 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.549670 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:02Z","lastTransitionTime":"2026-01-27T12:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.558489 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.569469 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.639975 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 23:03:46.471278686 +0000 UTC Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.652400 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.652452 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.652465 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.652490 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.652506 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:02Z","lastTransitionTime":"2026-01-27T12:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.755839 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.755941 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.755961 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.756007 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.756104 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:02Z","lastTransitionTime":"2026-01-27T12:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.858991 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.859050 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.859080 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.859104 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.859118 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:02Z","lastTransitionTime":"2026-01-27T12:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.961799 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.961886 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.961904 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.961926 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:02 crc kubenswrapper[4900]: I0127 12:27:02.961941 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:02Z","lastTransitionTime":"2026-01-27T12:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.064380 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.064480 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.064493 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.064509 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.064520 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:03Z","lastTransitionTime":"2026-01-27T12:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.167924 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.167975 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.168000 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.168021 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.168034 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:03Z","lastTransitionTime":"2026-01-27T12:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.277426 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.277493 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.277506 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.277530 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.277544 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:03Z","lastTransitionTime":"2026-01-27T12:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.379724 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.379774 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.379786 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.379802 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.379814 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:03Z","lastTransitionTime":"2026-01-27T12:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.480742 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.480774 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.480837 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:03 crc kubenswrapper[4900]: E0127 12:27:03.481091 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:27:03 crc kubenswrapper[4900]: E0127 12:27:03.481294 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:27:03 crc kubenswrapper[4900]: E0127 12:27:03.480930 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.483188 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.483255 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.483278 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.483302 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.483315 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:03Z","lastTransitionTime":"2026-01-27T12:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.588134 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.588191 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.588214 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.588235 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.588252 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:03Z","lastTransitionTime":"2026-01-27T12:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.640823 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 14:54:00.280191498 +0000 UTC Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.691083 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.691137 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.691154 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.691172 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.691185 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:03Z","lastTransitionTime":"2026-01-27T12:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.793834 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.793883 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.793893 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.793910 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.793921 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:03Z","lastTransitionTime":"2026-01-27T12:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.896784 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.896823 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.896835 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.896851 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.896863 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:03Z","lastTransitionTime":"2026-01-27T12:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.999893 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.999944 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:03 crc kubenswrapper[4900]: I0127 12:27:03.999956 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:03.999976 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:03.999993 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:03Z","lastTransitionTime":"2026-01-27T12:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.102828 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.102872 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.102883 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.102899 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.102911 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:04Z","lastTransitionTime":"2026-01-27T12:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.208103 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.208153 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.208165 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.208185 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.208198 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:04Z","lastTransitionTime":"2026-01-27T12:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.310456 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.310501 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.310514 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.310533 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.310549 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:04Z","lastTransitionTime":"2026-01-27T12:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.412458 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.412497 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.412512 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.412529 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.412540 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:04Z","lastTransitionTime":"2026-01-27T12:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.541099 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.541133 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.541142 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.541163 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.541179 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:04Z","lastTransitionTime":"2026-01-27T12:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.595752 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27"] Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.596689 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.601869 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.610627 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.636313 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.641044 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 17:22:15.892276127 +0000 UTC Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.647131 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.656152 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.656185 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.656196 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.656213 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.656230 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:04Z","lastTransitionTime":"2026-01-27T12:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.662652 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.685598 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.695924 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:27:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2tw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.710048 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.725284 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.729171 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2tw27\" (UID: \"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.729583 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2tw27\" (UID: \"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.729709 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2tw27\" (UID: \"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.729817 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxbkk\" (UniqueName: \"kubernetes.io/projected/79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee-kube-api-access-zxbkk\") pod \"ovnkube-control-plane-749d76644c-2tw27\" (UID: \"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.742349 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.753585 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.758345 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.758387 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.758398 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.758414 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.758427 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:04Z","lastTransitionTime":"2026-01-27T12:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.767569 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5s5j4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6d0716b-f743-4acf-bf23-060e177011ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lcvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5s5j4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.783395 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.807524 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.825115 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1015d8309da830a3f85f3f718b03fa13110edc2c782c946d79ccad6e0aeb6d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.830771 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxbkk\" (UniqueName: \"kubernetes.io/projected/79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee-kube-api-access-zxbkk\") pod \"ovnkube-control-plane-749d76644c-2tw27\" (UID: \"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.830830 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2tw27\" (UID: \"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.830851 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2tw27\" (UID: \"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.830879 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2tw27\" (UID: \"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.831475 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2tw27\" (UID: \"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.831506 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2tw27\" (UID: \"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.835576 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2tw27\" (UID: \"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.846748 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.850005 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxbkk\" (UniqueName: \"kubernetes.io/projected/79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee-kube-api-access-zxbkk\") pod \"ovnkube-control-plane-749d76644c-2tw27\" (UID: \"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.861163 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.861198 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.861207 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.861221 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.861231 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:04Z","lastTransitionTime":"2026-01-27T12:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.868008 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.965530 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.965577 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.965586 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.965603 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:04 crc kubenswrapper[4900]: I0127 12:27:04.965613 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:04Z","lastTransitionTime":"2026-01-27T12:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.068519 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.068574 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.068583 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.068607 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.068633 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:05Z","lastTransitionTime":"2026-01-27T12:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.129328 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.174992 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.175075 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.175093 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.175111 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.175120 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:05Z","lastTransitionTime":"2026-01-27T12:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.264714 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" event={"ID":"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee","Type":"ContainerStarted","Data":"0bfb327a320d17947038520a6f132f26ba40e5405c991ee4d9b28370ac27ab32"} Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.265632 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-22mkn" event={"ID":"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978","Type":"ContainerStarted","Data":"d4f3dd4dc37398d34df5fd18b646a2c385ebe3c5ba0545c4e592182b248e98b4"} Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.267702 4900 generic.go:334] "Generic (PLEG): container finished" podID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerID="d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66" exitCode=0 Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.267766 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerDied","Data":"d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66"} Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.279990 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.280090 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.280105 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.280136 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.280149 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:05Z","lastTransitionTime":"2026-01-27T12:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.288200 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.298796 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.314743 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.337985 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.349011 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:27:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2tw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.361629 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.376746 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.384691 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.384741 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.384754 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.384771 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.384783 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:05Z","lastTransitionTime":"2026-01-27T12:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.390774 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.404210 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4f3dd4dc37398d34df5fd18b646a2c385ebe3c5ba0545c4e592182b248e98b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.421454 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5s5j4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6d0716b-f743-4acf-bf23-060e177011ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lcvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5s5j4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.433123 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.449786 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.465742 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1015d8309da830a3f85f3f718b03fa13110edc2c782c946d79ccad6e0aeb6d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.485227 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.498873 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.499025 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.499084 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.499216 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.499330 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.499343 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.499483 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:21.499454737 +0000 UTC m=+68.736482947 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.499589 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.499667 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.499813 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.499853 4900 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.499950 4900 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.499992 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 12:27:21.499980003 +0000 UTC m=+68.737008213 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.500014 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 12:27:21.500005783 +0000 UTC m=+68.737033993 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.501510 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.501548 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.501558 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.501577 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.501672 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:05Z","lastTransitionTime":"2026-01-27T12:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.510310 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.523527 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.533415 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.545321 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.552691 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4f3dd4dc37398d34df5fd18b646a2c385ebe3c5ba0545c4e592182b248e98b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.562237 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5s5j4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6d0716b-f743-4acf-bf23-060e177011ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lcvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5s5j4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.574805 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.585251 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.597281 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.605460 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.605489 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.605498 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.605511 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.605522 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:05Z","lastTransitionTime":"2026-01-27T12:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.612228 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1015d8309da830a3f85f3f718b03fa13110edc2c782c946d79ccad6e0aeb6d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.631204 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:27:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.641768 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 08:10:53.109181125 +0000 UTC Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.645264 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.659255 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.671773 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.684579 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.695645 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:27:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2tw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.707504 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-7gwzm"] Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.707967 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.707990 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.708036 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7gwzm" podUID="b6e155fd-bee9-4c32-9919-0dbee597003e" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.707996 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.708090 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.708101 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.708110 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:05Z","lastTransitionTime":"2026-01-27T12:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.719113 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.732309 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.742980 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7gwzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6e155fd-bee9-4c32-9919-0dbee597003e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zphp8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zphp8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:27:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7gwzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.751558 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:27:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2tw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.764117 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.777596 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.786585 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.797439 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.802613 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zphp8\" (UniqueName: \"kubernetes.io/projected/b6e155fd-bee9-4c32-9919-0dbee597003e-kube-api-access-zphp8\") pod \"network-metrics-daemon-7gwzm\" (UID: \"b6e155fd-bee9-4c32-9919-0dbee597003e\") " pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.802687 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs\") pod \"network-metrics-daemon-7gwzm\" (UID: \"b6e155fd-bee9-4c32-9919-0dbee597003e\") " pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.802732 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.802770 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.802993 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.803033 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.803035 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.803121 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.803144 4900 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.803077 4900 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.803302 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 12:27:21.803277288 +0000 UTC m=+69.040305648 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.803353 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 12:27:21.803319829 +0000 UTC m=+69.040348239 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.806381 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4f3dd4dc37398d34df5fd18b646a2c385ebe3c5ba0545c4e592182b248e98b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.901563 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5s5j4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6d0716b-f743-4acf-bf23-060e177011ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lcvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5s5j4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.903023 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zphp8\" (UniqueName: \"kubernetes.io/projected/b6e155fd-bee9-4c32-9919-0dbee597003e-kube-api-access-zphp8\") pod \"network-metrics-daemon-7gwzm\" (UID: \"b6e155fd-bee9-4c32-9919-0dbee597003e\") " pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.903152 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs\") pod \"network-metrics-daemon-7gwzm\" (UID: \"b6e155fd-bee9-4c32-9919-0dbee597003e\") " pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.903311 4900 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 12:27:05 crc kubenswrapper[4900]: E0127 12:27:05.903369 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs podName:b6e155fd-bee9-4c32-9919-0dbee597003e nodeName:}" failed. No retries permitted until 2026-01-27 12:27:06.403353209 +0000 UTC m=+53.640381419 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs") pod "network-metrics-daemon-7gwzm" (UID: "b6e155fd-bee9-4c32-9919-0dbee597003e") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.903384 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.903413 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.903424 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.903445 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.903459 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:05Z","lastTransitionTime":"2026-01-27T12:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.932001 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:05 crc kubenswrapper[4900]: I0127 12:27:05.944010 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zphp8\" (UniqueName: \"kubernetes.io/projected/b6e155fd-bee9-4c32-9919-0dbee597003e-kube-api-access-zphp8\") pod \"network-metrics-daemon-7gwzm\" (UID: \"b6e155fd-bee9-4c32-9919-0dbee597003e\") " pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.006419 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.006447 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.089038 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.089192 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.089217 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:06Z","lastTransitionTime":"2026-01-27T12:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.089434 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1015d8309da830a3f85f3f718b03fa13110edc2c782c946d79ccad6e0aeb6d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.103135 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.115210 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.118597 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:27:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.131661 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.145248 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.158980 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.174170 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.190192 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.192572 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.192620 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.192635 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.192654 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.192669 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:06Z","lastTransitionTime":"2026-01-27T12:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.202575 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7gwzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6e155fd-bee9-4c32-9919-0dbee597003e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zphp8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zphp8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:27:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7gwzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.267622 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.274573 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"af84d5a9ef98243396aa00595710687c96c30c6ef36cecb724e3d415f945c8bd"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.274670 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"799d1a174787b0e5e1d0fbe4dd1d8784b4f6076c631410ab8be8af90a5f4fc67"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.276490 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" event={"ID":"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee","Type":"ContainerStarted","Data":"72d09522172a2d12c0f19069dfc65cf6d492b1c22faed0a6cee43041b76b99b3"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.276581 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" event={"ID":"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee","Type":"ContainerStarted","Data":"373ece630dc9da05772f8b44c2c153d7605f48f691d7c40deb15658818ffc463"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.278736 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerStarted","Data":"977db28e363d05b897c2144d2eaa3ae9ed3d311da4a6f1c16e2b2fe06a8bc5d1"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.278877 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerStarted","Data":"60da90add614230d95b112403a98a2405640af883ae37b369043fc310facd9c5"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.287542 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.295701 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.295918 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.295986 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.296080 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.296159 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:06Z","lastTransitionTime":"2026-01-27T12:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.299299 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:27:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2tw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.314296 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5s5j4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6d0716b-f743-4acf-bf23-060e177011ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lcvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5s5j4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.326069 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5756a6fc-8d7c-45c7-934c-9a9bf452e9b6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e78718c5347036ebcc7ca0f1b05b47f2dfbff5227c83fac9c7dee20c039c65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://236468db6329733ec60fbeb0ef57ec6394097d1d90c2ce041554d1f0c0e05b85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d0d4e52ceb899e2e715b769eaf9bbd5f7f3335f8cec106af20427a68f06d6cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23ddb4cbbb718a47767021238a8d00ed4855974cc9ee4d1f565bbff5491401b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23ddb4cbbb718a47767021238a8d00ed4855974cc9ee4d1f565bbff5491401b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.339977 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.352219 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.361941 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.370010 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4f3dd4dc37398d34df5fd18b646a2c385ebe3c5ba0545c4e592182b248e98b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.380779 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.392089 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.397895 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.397919 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.397926 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.397940 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.397949 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:06Z","lastTransitionTime":"2026-01-27T12:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.402477 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.409218 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs\") pod \"network-metrics-daemon-7gwzm\" (UID: \"b6e155fd-bee9-4c32-9919-0dbee597003e\") " pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:06 crc kubenswrapper[4900]: E0127 12:27:06.409436 4900 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 12:27:06 crc kubenswrapper[4900]: E0127 12:27:06.409525 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs podName:b6e155fd-bee9-4c32-9919-0dbee597003e nodeName:}" failed. No retries permitted until 2026-01-27 12:27:07.409504967 +0000 UTC m=+54.646533177 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs") pod "network-metrics-daemon-7gwzm" (UID: "b6e155fd-bee9-4c32-9919-0dbee597003e") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.413958 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1015d8309da830a3f85f3f718b03fa13110edc2c782c946d79ccad6e0aeb6d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.435014 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:27:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.445279 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af84d5a9ef98243396aa00595710687c96c30c6ef36cecb724e3d415f945c8bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://799d1a174787b0e5e1d0fbe4dd1d8784b4f6076c631410ab8be8af90a5f4fc67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.454939 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.466004 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7gwzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6e155fd-bee9-4c32-9919-0dbee597003e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zphp8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zphp8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:27:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7gwzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.485841 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.505355 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.517612 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://373ece630dc9da05772f8b44c2c153d7605f48f691d7c40deb15658818ffc463\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://72d09522172a2d12c0f19069dfc65cf6d492b1c22faed0a6cee43041b76b99b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:27:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2tw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.520386 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.520444 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.520455 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.520471 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.520482 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:06Z","lastTransitionTime":"2026-01-27T12:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.527313 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5s5j4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6d0716b-f743-4acf-bf23-060e177011ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lcvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5s5j4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.548974 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5756a6fc-8d7c-45c7-934c-9a9bf452e9b6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e78718c5347036ebcc7ca0f1b05b47f2dfbff5227c83fac9c7dee20c039c65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://236468db6329733ec60fbeb0ef57ec6394097d1d90c2ce041554d1f0c0e05b85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d0d4e52ceb899e2e715b769eaf9bbd5f7f3335f8cec106af20427a68f06d6cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23ddb4cbbb718a47767021238a8d00ed4855974cc9ee4d1f565bbff5491401b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23ddb4cbbb718a47767021238a8d00ed4855974cc9ee4d1f565bbff5491401b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.567643 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.590455 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.652879 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 02:18:26.689692488 +0000 UTC Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.655696 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.655726 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.655735 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.655747 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.655757 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:06Z","lastTransitionTime":"2026-01-27T12:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.663395 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.675904 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4f3dd4dc37398d34df5fd18b646a2c385ebe3c5ba0545c4e592182b248e98b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.687904 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.702019 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.715259 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.733441 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1015d8309da830a3f85f3f718b03fa13110edc2c782c946d79ccad6e0aeb6d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.753149 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:27:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.759476 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.759525 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.759537 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.759555 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.759566 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:06Z","lastTransitionTime":"2026-01-27T12:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.770043 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.781163 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1015d8309da830a3f85f3f718b03fa13110edc2c782c946d79ccad6e0aeb6d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.802791 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:27:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.815372 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.827739 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.838083 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7gwzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6e155fd-bee9-4c32-9919-0dbee597003e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zphp8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zphp8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:27:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7gwzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.850366 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af84d5a9ef98243396aa00595710687c96c30c6ef36cecb724e3d415f945c8bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://799d1a174787b0e5e1d0fbe4dd1d8784b4f6076c631410ab8be8af90a5f4fc67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.863820 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.863860 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.863869 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.863890 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.863901 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:06Z","lastTransitionTime":"2026-01-27T12:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.897408 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.917893 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.938090 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://373ece630dc9da05772f8b44c2c153d7605f48f691d7c40deb15658818ffc463\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://72d09522172a2d12c0f19069dfc65cf6d492b1c22faed0a6cee43041b76b99b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:27:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2tw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.955787 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.965617 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.965667 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.965679 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.965696 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.965706 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:06Z","lastTransitionTime":"2026-01-27T12:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:06 crc kubenswrapper[4900]: I0127 12:27:06.995902 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.034387 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.069134 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.069460 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.069474 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.069495 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.069509 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:07Z","lastTransitionTime":"2026-01-27T12:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.073556 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.073662 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.073684 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.073715 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.073730 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:07Z","lastTransitionTime":"2026-01-27T12:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.075847 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: E0127 12:27:07.083780 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9d7606d1-10b8-4621-b721-e2ce018de34c\\\",\\\"systemUUID\\\":\\\"95da1c6e-623c-4d7b-a8e5-ff27d2b5f353\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.087748 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.087817 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.087833 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.087857 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.087871 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:07Z","lastTransitionTime":"2026-01-27T12:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:07 crc kubenswrapper[4900]: E0127 12:27:07.100186 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9d7606d1-10b8-4621-b721-e2ce018de34c\\\",\\\"systemUUID\\\":\\\"95da1c6e-623c-4d7b-a8e5-ff27d2b5f353\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.104387 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.104427 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.104439 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.104457 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.104466 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:07Z","lastTransitionTime":"2026-01-27T12:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.113050 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4f3dd4dc37398d34df5fd18b646a2c385ebe3c5ba0545c4e592182b248e98b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: E0127 12:27:07.115951 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9d7606d1-10b8-4621-b721-e2ce018de34c\\\",\\\"systemUUID\\\":\\\"95da1c6e-623c-4d7b-a8e5-ff27d2b5f353\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.119767 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.119810 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.119822 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.119838 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.119848 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:07Z","lastTransitionTime":"2026-01-27T12:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:07 crc kubenswrapper[4900]: E0127 12:27:07.128915 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9d7606d1-10b8-4621-b721-e2ce018de34c\\\",\\\"systemUUID\\\":\\\"95da1c6e-623c-4d7b-a8e5-ff27d2b5f353\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.132293 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.132321 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.132331 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.132351 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.132362 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:07Z","lastTransitionTime":"2026-01-27T12:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:07 crc kubenswrapper[4900]: E0127 12:27:07.143154 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9d7606d1-10b8-4621-b721-e2ce018de34c\\\",\\\"systemUUID\\\":\\\"95da1c6e-623c-4d7b-a8e5-ff27d2b5f353\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: E0127 12:27:07.143290 4900 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.153681 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5s5j4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6d0716b-f743-4acf-bf23-060e177011ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lcvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5s5j4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.172671 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.172724 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.172735 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.172753 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.172764 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:07Z","lastTransitionTime":"2026-01-27T12:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.196252 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5756a6fc-8d7c-45c7-934c-9a9bf452e9b6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e78718c5347036ebcc7ca0f1b05b47f2dfbff5227c83fac9c7dee20c039c65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://236468db6329733ec60fbeb0ef57ec6394097d1d90c2ce041554d1f0c0e05b85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d0d4e52ceb899e2e715b769eaf9bbd5f7f3335f8cec106af20427a68f06d6cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23ddb4cbbb718a47767021238a8d00ed4855974cc9ee4d1f565bbff5491401b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23ddb4cbbb718a47767021238a8d00ed4855974cc9ee4d1f565bbff5491401b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.275143 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.275179 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.275188 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.275201 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.275212 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:07Z","lastTransitionTime":"2026-01-27T12:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.285116 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerStarted","Data":"f2d5441aa7877eac34cbb8ff8d85aea47a33853940815aae71cccd0c375c0f0c"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.285162 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerStarted","Data":"3f5fb675be3f19a0f06218c3aefc687f195b3c99c2df38d78b9bd4b3e15b5264"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.285173 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerStarted","Data":"03cd682fbafb58d28a6ce344c4ae0452c421e5adeea277ce9a2f43ae3739f5a9"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.285182 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerStarted","Data":"767483b4ee320dae20ff9a4ad30712ab30ded730d25f7ba2a3cdc94007c8d98d"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.286907 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"258a3e6116407d233fa6f12c7191e2c409819516208c7d894d39b6dff15a34dd"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.286954 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"c367f1c091a4089199dab2cd57f0cfc0e168be0d9a53e6a985b45843aa8d13a7"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.298844 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-twlmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02bfa799-f281-465d-ab6f-19ea9c16979c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9hf9p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-twlmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.309032 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7gwzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6e155fd-bee9-4c32-9919-0dbee597003e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zphp8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zphp8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:27:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7gwzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.319910 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2f2c6408-cc23-4b42-92ba-ef08be13637b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af84d5a9ef98243396aa00595710687c96c30c6ef36cecb724e3d415f945c8bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://799d1a174787b0e5e1d0fbe4dd1d8784b4f6076c631410ab8be8af90a5f4fc67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-664p6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:51Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2pp6x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.357186 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0127 12:26:42.122798 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0127 12:26:42.124112 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-405323390/tls.crt::/tmp/serving-cert-405323390/tls.key\\\\\\\"\\\\nI0127 12:26:49.597395 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0127 12:26:49.627247 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0127 12:26:49.627284 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0127 12:26:49.627335 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0127 12:26:49.627343 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0127 12:26:49.651380 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0127 12:26:49.651372 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0127 12:26:49.651461 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0127 12:26:49.651474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0127 12:26:49.651483 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0127 12:26:49.651487 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0127 12:26:49.651492 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0127 12:26:49.659790 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:31Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:24Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.378216 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.378269 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.378287 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.378307 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.378318 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:07Z","lastTransitionTime":"2026-01-27T12:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.396271 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"206345a2-ea7e-4a32-8c93-414290ba5c92\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hg9l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-lrt6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.434706 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"79cc2fbf-b4e0-4598-bcfd-3c6a649b0aee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://373ece630dc9da05772f8b44c2c153d7605f48f691d7c40deb15658818ffc463\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://72d09522172a2d12c0f19069dfc65cf6d492b1c22faed0a6cee43041b76b99b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zxbkk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:27:04Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2tw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.466899 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs\") pod \"network-metrics-daemon-7gwzm\" (UID: \"b6e155fd-bee9-4c32-9919-0dbee597003e\") " pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:07 crc kubenswrapper[4900]: E0127 12:27:07.467072 4900 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 12:27:07 crc kubenswrapper[4900]: E0127 12:27:07.467125 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs podName:b6e155fd-bee9-4c32-9919-0dbee597003e nodeName:}" failed. No retries permitted until 2026-01-27 12:27:09.467111328 +0000 UTC m=+56.704139538 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs") pod "network-metrics-daemon-7gwzm" (UID: "b6e155fd-bee9-4c32-9919-0dbee597003e") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.475901 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.483936 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.483953 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.483975 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.483986 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.483993 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.484006 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.484068 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:07Z","lastTransitionTime":"2026-01-27T12:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:07 crc kubenswrapper[4900]: E0127 12:27:07.484074 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:27:07 crc kubenswrapper[4900]: E0127 12:27:07.484135 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.484181 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:07 crc kubenswrapper[4900]: E0127 12:27:07.484232 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7gwzm" podUID="b6e155fd-bee9-4c32-9919-0dbee597003e" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.484258 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:07 crc kubenswrapper[4900]: E0127 12:27:07.484362 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.514114 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.554373 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.586708 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.586750 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.586761 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.586778 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.586791 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:07Z","lastTransitionTime":"2026-01-27T12:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.593260 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-22mkn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d3d3b849-9dcd-43d2-a8e2-26a3f37ef978\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4f3dd4dc37398d34df5fd18b646a2c385ebe3c5ba0545c4e592182b248e98b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mqc4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-22mkn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.632715 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5s5j4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b6d0716b-f743-4acf-bf23-060e177011ee\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2lcvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5s5j4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.653776 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 23:17:23.403324234 +0000 UTC Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.674180 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5756a6fc-8d7c-45c7-934c-9a9bf452e9b6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e78718c5347036ebcc7ca0f1b05b47f2dfbff5227c83fac9c7dee20c039c65c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://236468db6329733ec60fbeb0ef57ec6394097d1d90c2ce041554d1f0c0e05b85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d0d4e52ceb899e2e715b769eaf9bbd5f7f3335f8cec106af20427a68f06d6cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23ddb4cbbb718a47767021238a8d00ed4855974cc9ee4d1f565bbff5491401b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://23ddb4cbbb718a47767021238a8d00ed4855974cc9ee4d1f565bbff5491401b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:26:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.690221 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.690264 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.690274 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.690290 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.690299 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:07Z","lastTransitionTime":"2026-01-27T12:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.718139 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:07Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://258a3e6116407d233fa6f12c7191e2c409819516208c7d894d39b6dff15a34dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c367f1c091a4089199dab2cd57f0cfc0e168be0d9a53e6a985b45843aa8d13a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.755392 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:49Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.793585 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.793635 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.793647 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.793665 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.793681 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:07Z","lastTransitionTime":"2026-01-27T12:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.797681 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:02Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1015d8309da830a3f85f3f718b03fa13110edc2c782c946d79ccad6e0aeb6d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:27:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.842149 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:27:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:52Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-27T12:27:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-27T12:27:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h7s6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-dqltj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.880188 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"848d1012-69c6-4e54-b1d9-457d3fbabb89\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-27T12:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0d062bfbe48c9d7697d04710ef4292203e0fb5717cee4bdb4a0bef32ed2b895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://749eeea46284f6dc8adb6ecf58ce21041e59ec4426c3299ed8ee02e10e9c7631\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd5a83e639b29e1a784600e90a8a301db2662bcd0dace29e28c19af7551c882f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-27T12:26:16Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.896188 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.896229 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.896240 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.896254 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:07 crc kubenswrapper[4900]: I0127 12:27:07.896271 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:07Z","lastTransitionTime":"2026-01-27T12:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:07.999991 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.000029 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.000041 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.000069 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.000080 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:08Z","lastTransitionTime":"2026-01-27T12:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.102436 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.102481 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.102493 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.102508 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.102517 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:08Z","lastTransitionTime":"2026-01-27T12:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.206618 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.206664 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.206676 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.206694 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.206707 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:08Z","lastTransitionTime":"2026-01-27T12:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.292206 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"e00b8905baddcdf3d3f0f4f634d07d91c64b4de52e420e6218b86338c4db22d2"} Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.293967 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-twlmq" event={"ID":"02bfa799-f281-465d-ab6f-19ea9c16979c","Type":"ContainerStarted","Data":"5409c6fd120f00c9c06539ee7ad612a50874982417a1bf5f14d2867689d31c48"} Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.309402 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.309436 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.309445 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.309477 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.309493 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:08Z","lastTransitionTime":"2026-01-27T12:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.362251 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-22mkn" podStartSLOduration=19.362211125 podStartE2EDuration="19.362211125s" podCreationTimestamp="2026-01-27 12:26:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:08.361971998 +0000 UTC m=+55.599000208" watchObservedRunningTime="2026-01-27 12:27:08.362211125 +0000 UTC m=+55.599239335" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.413094 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.413138 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.413148 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.413165 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.413187 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:08Z","lastTransitionTime":"2026-01-27T12:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.421762 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=2.421736621 podStartE2EDuration="2.421736621s" podCreationTimestamp="2026-01-27 12:27:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:08.421486884 +0000 UTC m=+55.658515094" watchObservedRunningTime="2026-01-27 12:27:08.421736621 +0000 UTC m=+55.658764821" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.641249 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.641303 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.641315 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.641328 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.641339 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:08Z","lastTransitionTime":"2026-01-27T12:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.654881 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 00:41:48.24233074 +0000 UTC Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.739911 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=16.739889644 podStartE2EDuration="16.739889644s" podCreationTimestamp="2026-01-27 12:26:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:08.723454389 +0000 UTC m=+55.960482609" watchObservedRunningTime="2026-01-27 12:27:08.739889644 +0000 UTC m=+55.976917854" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.744378 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.744410 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.744427 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.744451 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.744658 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:08Z","lastTransitionTime":"2026-01-27T12:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.781789 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podStartSLOduration=18.781759349 podStartE2EDuration="18.781759349s" podCreationTimestamp="2026-01-27 12:26:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:08.781516152 +0000 UTC m=+56.018544362" watchObservedRunningTime="2026-01-27 12:27:08.781759349 +0000 UTC m=+56.018787559" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.848887 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.849286 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.849298 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.849317 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.849331 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:08Z","lastTransitionTime":"2026-01-27T12:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.852083 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2tw27" podStartSLOduration=17.852033561 podStartE2EDuration="17.852033561s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:08.85132858 +0000 UTC m=+56.088356800" watchObservedRunningTime="2026-01-27 12:27:08.852033561 +0000 UTC m=+56.089061771" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.951892 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.951935 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.951944 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.951958 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:08 crc kubenswrapper[4900]: I0127 12:27:08.951969 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:08Z","lastTransitionTime":"2026-01-27T12:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.054500 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.054599 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.054611 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.054629 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.054641 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:09Z","lastTransitionTime":"2026-01-27T12:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.158049 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.158132 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.158147 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.158193 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.158207 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:09Z","lastTransitionTime":"2026-01-27T12:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.264018 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.264277 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.264304 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.264348 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.264368 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:09Z","lastTransitionTime":"2026-01-27T12:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.367041 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.367099 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.367108 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.367127 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.367137 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:09Z","lastTransitionTime":"2026-01-27T12:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.470082 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.470120 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.470129 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.470144 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.470155 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:09Z","lastTransitionTime":"2026-01-27T12:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.481344 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.481408 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:09 crc kubenswrapper[4900]: E0127 12:27:09.481536 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.481535 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.481604 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:09 crc kubenswrapper[4900]: E0127 12:27:09.481743 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7gwzm" podUID="b6e155fd-bee9-4c32-9919-0dbee597003e" Jan 27 12:27:09 crc kubenswrapper[4900]: E0127 12:27:09.481932 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:27:09 crc kubenswrapper[4900]: E0127 12:27:09.482002 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.544226 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs\") pod \"network-metrics-daemon-7gwzm\" (UID: \"b6e155fd-bee9-4c32-9919-0dbee597003e\") " pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:09 crc kubenswrapper[4900]: E0127 12:27:09.544490 4900 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 12:27:09 crc kubenswrapper[4900]: E0127 12:27:09.544599 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs podName:b6e155fd-bee9-4c32-9919-0dbee597003e nodeName:}" failed. No retries permitted until 2026-01-27 12:27:13.544572646 +0000 UTC m=+60.781600916 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs") pod "network-metrics-daemon-7gwzm" (UID: "b6e155fd-bee9-4c32-9919-0dbee597003e") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.573269 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.573300 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.573309 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.573323 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.573334 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:09Z","lastTransitionTime":"2026-01-27T12:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.655879 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 09:25:34.68170836 +0000 UTC Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.703291 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.703378 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.703396 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.703432 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.703455 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:09Z","lastTransitionTime":"2026-01-27T12:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.805825 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.806186 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.806289 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.806379 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.806475 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:09Z","lastTransitionTime":"2026-01-27T12:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.909371 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.909417 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.909430 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.909458 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:09 crc kubenswrapper[4900]: I0127 12:27:09.909524 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:09Z","lastTransitionTime":"2026-01-27T12:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.012192 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.012236 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.012245 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.012261 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.012271 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:10Z","lastTransitionTime":"2026-01-27T12:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.115220 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.115262 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.115275 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.115290 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.115302 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:10Z","lastTransitionTime":"2026-01-27T12:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.218013 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.218070 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.218083 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.218099 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.218110 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:10Z","lastTransitionTime":"2026-01-27T12:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.320290 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.320620 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.320632 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.320673 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.320699 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:10Z","lastTransitionTime":"2026-01-27T12:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.347133 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" event={"ID":"206345a2-ea7e-4a32-8c93-414290ba5c92","Type":"ContainerStarted","Data":"b8a1d64a2e49e14b1a17319dc7de0e4536c7cadb7d799fb6f4c862af5ecd9e24"} Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.359113 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerStarted","Data":"9f25e3ed7531a11ded9fef3eb5ea317c80bbd3c8474509f4b2cf45c9702fae40"} Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.424361 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.424418 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.424428 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.424444 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.424460 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:10Z","lastTransitionTime":"2026-01-27T12:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.780943 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 09:01:46.895921475 +0000 UTC Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.781317 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-twlmq" podStartSLOduration=19.78128876 podStartE2EDuration="19.78128876s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:08.90759114 +0000 UTC m=+56.144619360" watchObservedRunningTime="2026-01-27 12:27:10.78128876 +0000 UTC m=+58.018316970" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.784017 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.784079 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.784092 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.784119 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.784130 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:10Z","lastTransitionTime":"2026-01-27T12:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.900688 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.900728 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.900764 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.900786 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:10 crc kubenswrapper[4900]: I0127 12:27:10.900802 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:10Z","lastTransitionTime":"2026-01-27T12:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.003535 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.003621 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.003633 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.003652 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.003664 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:11Z","lastTransitionTime":"2026-01-27T12:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.182104 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.182149 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.182159 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.182173 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.182182 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:11Z","lastTransitionTime":"2026-01-27T12:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.286350 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.286432 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.286449 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.286472 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.286485 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:11Z","lastTransitionTime":"2026-01-27T12:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.365918 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-5s5j4" event={"ID":"b6d0716b-f743-4acf-bf23-060e177011ee","Type":"ContainerStarted","Data":"a9d6388e6d6fc0fd8abb7a68acd265f517667f6ee0400af455298dab785ec5b3"} Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.389817 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.389859 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.389870 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.389886 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.389899 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:11Z","lastTransitionTime":"2026-01-27T12:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.481079 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.481103 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.481291 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.481312 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:11 crc kubenswrapper[4900]: E0127 12:27:11.481415 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7gwzm" podUID="b6e155fd-bee9-4c32-9919-0dbee597003e" Jan 27 12:27:11 crc kubenswrapper[4900]: E0127 12:27:11.481514 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:27:11 crc kubenswrapper[4900]: E0127 12:27:11.481608 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:27:11 crc kubenswrapper[4900]: E0127 12:27:11.481676 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.481695 4900 scope.go:117] "RemoveContainer" containerID="1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.492660 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.492701 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.492711 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.492725 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.492735 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:11Z","lastTransitionTime":"2026-01-27T12:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.597389 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.597432 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.597446 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.597463 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.597474 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:11Z","lastTransitionTime":"2026-01-27T12:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.699962 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.699998 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.700009 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.700022 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.700032 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:11Z","lastTransitionTime":"2026-01-27T12:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.842623 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 11:32:59.791341419 +0000 UTC Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.845204 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.845232 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.845242 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.845256 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.845266 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:11Z","lastTransitionTime":"2026-01-27T12:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.948612 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.948882 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.948890 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.948903 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:11 crc kubenswrapper[4900]: I0127 12:27:11.948912 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:11Z","lastTransitionTime":"2026-01-27T12:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.053958 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.053996 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.054005 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.054022 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.054034 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:12Z","lastTransitionTime":"2026-01-27T12:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.157019 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.157087 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.157102 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.157121 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.157139 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:12Z","lastTransitionTime":"2026-01-27T12:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.259301 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.259336 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.259344 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.259357 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.259365 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:12Z","lastTransitionTime":"2026-01-27T12:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.361814 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.361868 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.361887 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.361912 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.361931 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:12Z","lastTransitionTime":"2026-01-27T12:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.376824 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerStarted","Data":"02f786492a42aa6d248f0e4d2bb3278a1e4e49f6bf493122e3c4652f7cb939ae"} Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.377294 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.379387 4900 generic.go:334] "Generic (PLEG): container finished" podID="206345a2-ea7e-4a32-8c93-414290ba5c92" containerID="b8a1d64a2e49e14b1a17319dc7de0e4536c7cadb7d799fb6f4c862af5ecd9e24" exitCode=0 Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.379468 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" event={"ID":"206345a2-ea7e-4a32-8c93-414290ba5c92","Type":"ContainerDied","Data":"b8a1d64a2e49e14b1a17319dc7de0e4536c7cadb7d799fb6f4c862af5ecd9e24"} Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.382895 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.384376 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"355e30f76b375b00057638f14bf0b41adf853c25aba5fc64ed7879e242b936c3"} Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.385095 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.422403 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.432861 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" podStartSLOduration=21.432834227 podStartE2EDuration="21.432834227s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:12.432352142 +0000 UTC m=+59.669380362" watchObservedRunningTime="2026-01-27 12:27:12.432834227 +0000 UTC m=+59.669862427" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.433322 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-5s5j4" podStartSLOduration=22.433315881 podStartE2EDuration="22.433315881s" podCreationTimestamp="2026-01-27 12:26:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:11.462975484 +0000 UTC m=+58.700003714" watchObservedRunningTime="2026-01-27 12:27:12.433315881 +0000 UTC m=+59.670344081" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.465788 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.465832 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.465850 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.465870 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.465888 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:12Z","lastTransitionTime":"2026-01-27T12:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.518724 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=22.518687529 podStartE2EDuration="22.518687529s" podCreationTimestamp="2026-01-27 12:26:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:12.517692639 +0000 UTC m=+59.754720849" watchObservedRunningTime="2026-01-27 12:27:12.518687529 +0000 UTC m=+59.755715739" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.569200 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.569247 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.569258 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.569282 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.569294 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:12Z","lastTransitionTime":"2026-01-27T12:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.672043 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.672082 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.672091 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.672104 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.672113 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:12Z","lastTransitionTime":"2026-01-27T12:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.774918 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.774950 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.774960 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.774975 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.774992 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:12Z","lastTransitionTime":"2026-01-27T12:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.842865 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 00:47:02.103299203 +0000 UTC Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.950009 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.950077 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.950088 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.950102 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:12 crc kubenswrapper[4900]: I0127 12:27:12.950112 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:12Z","lastTransitionTime":"2026-01-27T12:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.052871 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.052910 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.052918 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.052935 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.052946 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:13Z","lastTransitionTime":"2026-01-27T12:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.085995 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.211023 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.211086 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.211099 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.211116 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.211128 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:13Z","lastTransitionTime":"2026-01-27T12:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.234502 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.313335 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.313375 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.313389 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.313410 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.313425 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:13Z","lastTransitionTime":"2026-01-27T12:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.393820 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" event={"ID":"206345a2-ea7e-4a32-8c93-414290ba5c92","Type":"ContainerStarted","Data":"1a4620666d7a3853f40d9c4659560df153c7cf001bef38b90d52cad4e5b358fb"} Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.394401 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.421535 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.421562 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.421573 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.421587 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.421596 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:13Z","lastTransitionTime":"2026-01-27T12:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.481334 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.481392 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.481411 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:13 crc kubenswrapper[4900]: E0127 12:27:13.481540 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7gwzm" podUID="b6e155fd-bee9-4c32-9919-0dbee597003e" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.481694 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:13 crc kubenswrapper[4900]: E0127 12:27:13.481808 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:27:13 crc kubenswrapper[4900]: E0127 12:27:13.481710 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:27:13 crc kubenswrapper[4900]: E0127 12:27:13.481978 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.523705 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.523738 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.523748 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.523763 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.523773 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:13Z","lastTransitionTime":"2026-01-27T12:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.620607 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs\") pod \"network-metrics-daemon-7gwzm\" (UID: \"b6e155fd-bee9-4c32-9919-0dbee597003e\") " pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:13 crc kubenswrapper[4900]: E0127 12:27:13.620825 4900 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 12:27:13 crc kubenswrapper[4900]: E0127 12:27:13.620889 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs podName:b6e155fd-bee9-4c32-9919-0dbee597003e nodeName:}" failed. No retries permitted until 2026-01-27 12:27:21.620870734 +0000 UTC m=+68.857898944 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs") pod "network-metrics-daemon-7gwzm" (UID: "b6e155fd-bee9-4c32-9919-0dbee597003e") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.625965 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.626015 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.626028 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.626043 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.626105 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:13Z","lastTransitionTime":"2026-01-27T12:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.734155 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.734195 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.734204 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.734225 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.734241 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:13Z","lastTransitionTime":"2026-01-27T12:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.837278 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.837335 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.837349 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.837373 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.837388 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:13Z","lastTransitionTime":"2026-01-27T12:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.843410 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 13:21:00.370727929 +0000 UTC Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.939680 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.939723 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.939739 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.939763 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:13 crc kubenswrapper[4900]: I0127 12:27:13.939779 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:13Z","lastTransitionTime":"2026-01-27T12:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.111270 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.111315 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.111325 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.111341 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.111352 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:14Z","lastTransitionTime":"2026-01-27T12:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.213462 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.213507 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.213519 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.213535 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.213548 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:14Z","lastTransitionTime":"2026-01-27T12:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.315623 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.315666 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.315676 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.315691 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.315701 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:14Z","lastTransitionTime":"2026-01-27T12:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.494487 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.494515 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.494523 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.494536 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.494545 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:14Z","lastTransitionTime":"2026-01-27T12:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.597336 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.597370 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.597381 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.597396 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.597406 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:14Z","lastTransitionTime":"2026-01-27T12:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.700095 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.700141 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.700151 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.700166 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.700175 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:14Z","lastTransitionTime":"2026-01-27T12:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.803181 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.803219 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.803229 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.803243 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.803254 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:14Z","lastTransitionTime":"2026-01-27T12:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.844352 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 14:38:34.857603702 +0000 UTC Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.906155 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.906193 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.906201 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.906218 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:14 crc kubenswrapper[4900]: I0127 12:27:14.906228 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:14Z","lastTransitionTime":"2026-01-27T12:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.009224 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.009270 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.009296 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.009313 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.009324 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:15Z","lastTransitionTime":"2026-01-27T12:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.112100 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.112157 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.112171 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.112188 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.112198 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:15Z","lastTransitionTime":"2026-01-27T12:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.215169 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.215229 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.215241 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.215265 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.215279 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:15Z","lastTransitionTime":"2026-01-27T12:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.320708 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.320760 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.320770 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.320785 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.320795 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:15Z","lastTransitionTime":"2026-01-27T12:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.425290 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.425343 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.425354 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.425371 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.425387 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:15Z","lastTransitionTime":"2026-01-27T12:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.481791 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.481843 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.482047 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:15 crc kubenswrapper[4900]: E0127 12:27:15.482238 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7gwzm" podUID="b6e155fd-bee9-4c32-9919-0dbee597003e" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.482296 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:15 crc kubenswrapper[4900]: E0127 12:27:15.482563 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:27:15 crc kubenswrapper[4900]: E0127 12:27:15.482643 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:27:15 crc kubenswrapper[4900]: E0127 12:27:15.482714 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.532449 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.532498 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.532509 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.532527 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.532541 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:15Z","lastTransitionTime":"2026-01-27T12:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.635892 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.635929 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.635941 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.635958 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.635971 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:15Z","lastTransitionTime":"2026-01-27T12:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.739848 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.739894 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.739905 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.739923 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.739936 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:15Z","lastTransitionTime":"2026-01-27T12:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.843257 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.843303 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.843311 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.843328 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.843341 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:15Z","lastTransitionTime":"2026-01-27T12:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.845422 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 21:19:10.179917998 +0000 UTC Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.946419 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.946480 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.946491 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.946512 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:15 crc kubenswrapper[4900]: I0127 12:27:15.946525 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:15Z","lastTransitionTime":"2026-01-27T12:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.050496 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.050900 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.050915 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.050938 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.050951 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:16Z","lastTransitionTime":"2026-01-27T12:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.154305 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.154611 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.154636 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.154653 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.154668 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:16Z","lastTransitionTime":"2026-01-27T12:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.257750 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.257787 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.257799 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.257816 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.257828 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:16Z","lastTransitionTime":"2026-01-27T12:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.361334 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.361394 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.361406 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.361426 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.361440 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:16Z","lastTransitionTime":"2026-01-27T12:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.465206 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.465272 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.465285 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.465301 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.465313 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:16Z","lastTransitionTime":"2026-01-27T12:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.500203 4900 generic.go:334] "Generic (PLEG): container finished" podID="206345a2-ea7e-4a32-8c93-414290ba5c92" containerID="1a4620666d7a3853f40d9c4659560df153c7cf001bef38b90d52cad4e5b358fb" exitCode=0 Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.500295 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" event={"ID":"206345a2-ea7e-4a32-8c93-414290ba5c92","Type":"ContainerDied","Data":"1a4620666d7a3853f40d9c4659560df153c7cf001bef38b90d52cad4e5b358fb"} Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.567911 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.567961 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.568084 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.568138 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.568150 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:16Z","lastTransitionTime":"2026-01-27T12:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.669661 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.669688 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.669698 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.669714 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.669723 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:16Z","lastTransitionTime":"2026-01-27T12:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.774728 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.774988 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.775000 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.775022 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.775036 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:16Z","lastTransitionTime":"2026-01-27T12:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.846317 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 23:48:30.291431391 +0000 UTC Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.878681 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.878731 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.878740 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.878758 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.878768 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:16Z","lastTransitionTime":"2026-01-27T12:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.981646 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.981687 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.981698 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.981713 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:16 crc kubenswrapper[4900]: I0127 12:27:16.981723 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:16Z","lastTransitionTime":"2026-01-27T12:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.084701 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.084997 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.085113 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.085221 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.085333 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:17Z","lastTransitionTime":"2026-01-27T12:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.215307 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.215354 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.215367 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.215383 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.215396 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:17Z","lastTransitionTime":"2026-01-27T12:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.228338 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.228571 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.228673 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.228770 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.228915 4900 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-27T12:27:17Z","lastTransitionTime":"2026-01-27T12:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.460625 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452"] Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.461033 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.462921 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.462967 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.463076 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.463299 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.480921 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.480973 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.481012 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.481106 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:17 crc kubenswrapper[4900]: E0127 12:27:17.481286 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:27:17 crc kubenswrapper[4900]: E0127 12:27:17.481414 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:27:17 crc kubenswrapper[4900]: E0127 12:27:17.481557 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7gwzm" podUID="b6e155fd-bee9-4c32-9919-0dbee597003e" Jan 27 12:27:17 crc kubenswrapper[4900]: E0127 12:27:17.481679 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.506342 4900 generic.go:334] "Generic (PLEG): container finished" podID="206345a2-ea7e-4a32-8c93-414290ba5c92" containerID="3d69c3babfbc4c15c93d1ec5895dedb6d5eaac6b05c216e6cf16e30c2c2d460a" exitCode=0 Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.506398 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" event={"ID":"206345a2-ea7e-4a32-8c93-414290ba5c92","Type":"ContainerDied","Data":"3d69c3babfbc4c15c93d1ec5895dedb6d5eaac6b05c216e6cf16e30c2c2d460a"} Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.621339 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c362bf8d-619f-4469-8e83-eb0a66a31633-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.621395 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/c362bf8d-619f-4469-8e83-eb0a66a31633-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.621435 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c362bf8d-619f-4469-8e83-eb0a66a31633-service-ca\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.621458 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/c362bf8d-619f-4469-8e83-eb0a66a31633-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.621517 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c362bf8d-619f-4469-8e83-eb0a66a31633-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.722111 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c362bf8d-619f-4469-8e83-eb0a66a31633-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.722474 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c362bf8d-619f-4469-8e83-eb0a66a31633-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.722498 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/c362bf8d-619f-4469-8e83-eb0a66a31633-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.722545 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c362bf8d-619f-4469-8e83-eb0a66a31633-service-ca\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.722599 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/c362bf8d-619f-4469-8e83-eb0a66a31633-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.722676 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/c362bf8d-619f-4469-8e83-eb0a66a31633-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.723880 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/c362bf8d-619f-4469-8e83-eb0a66a31633-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.724205 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c362bf8d-619f-4469-8e83-eb0a66a31633-service-ca\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.737901 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c362bf8d-619f-4469-8e83-eb0a66a31633-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.742554 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c362bf8d-619f-4469-8e83-eb0a66a31633-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-wm452\" (UID: \"c362bf8d-619f-4469-8e83-eb0a66a31633\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.823479 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" Jan 27 12:27:17 crc kubenswrapper[4900]: W0127 12:27:17.836019 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc362bf8d_619f_4469_8e83_eb0a66a31633.slice/crio-a7d42c3c5218500bd23fef75b53aed8195159ba820136859b03a7e605188d589 WatchSource:0}: Error finding container a7d42c3c5218500bd23fef75b53aed8195159ba820136859b03a7e605188d589: Status 404 returned error can't find the container with id a7d42c3c5218500bd23fef75b53aed8195159ba820136859b03a7e605188d589 Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.847038 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 23:16:37.595058418 +0000 UTC Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.847133 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 27 12:27:17 crc kubenswrapper[4900]: I0127 12:27:17.856758 4900 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 27 12:27:18 crc kubenswrapper[4900]: I0127 12:27:18.043334 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-7gwzm"] Jan 27 12:27:18 crc kubenswrapper[4900]: I0127 12:27:18.043965 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:18 crc kubenswrapper[4900]: E0127 12:27:18.044255 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7gwzm" podUID="b6e155fd-bee9-4c32-9919-0dbee597003e" Jan 27 12:27:18 crc kubenswrapper[4900]: I0127 12:27:18.511541 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" event={"ID":"c362bf8d-619f-4469-8e83-eb0a66a31633","Type":"ContainerStarted","Data":"d6ba44e83735168fa534731d455e2445a1b7b541f012f5f43dfe364ca0fcbed5"} Jan 27 12:27:18 crc kubenswrapper[4900]: I0127 12:27:18.511593 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" event={"ID":"c362bf8d-619f-4469-8e83-eb0a66a31633","Type":"ContainerStarted","Data":"a7d42c3c5218500bd23fef75b53aed8195159ba820136859b03a7e605188d589"} Jan 27 12:27:18 crc kubenswrapper[4900]: I0127 12:27:18.520293 4900 generic.go:334] "Generic (PLEG): container finished" podID="206345a2-ea7e-4a32-8c93-414290ba5c92" containerID="e87d890d4ab407f1025c2d3a9fa4f8e4923fd3ad306efc15c699b0e8cee5a30c" exitCode=0 Jan 27 12:27:18 crc kubenswrapper[4900]: I0127 12:27:18.520366 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" event={"ID":"206345a2-ea7e-4a32-8c93-414290ba5c92","Type":"ContainerDied","Data":"e87d890d4ab407f1025c2d3a9fa4f8e4923fd3ad306efc15c699b0e8cee5a30c"} Jan 27 12:27:18 crc kubenswrapper[4900]: I0127 12:27:18.590885 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wm452" podStartSLOduration=27.590852892 podStartE2EDuration="27.590852892s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:18.56504852 +0000 UTC m=+65.802076730" watchObservedRunningTime="2026-01-27 12:27:18.590852892 +0000 UTC m=+65.827881102" Jan 27 12:27:19 crc kubenswrapper[4900]: I0127 12:27:19.481407 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:19 crc kubenswrapper[4900]: I0127 12:27:19.481470 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:19 crc kubenswrapper[4900]: E0127 12:27:19.481770 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:27:19 crc kubenswrapper[4900]: I0127 12:27:19.481550 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:19 crc kubenswrapper[4900]: E0127 12:27:19.481933 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:27:19 crc kubenswrapper[4900]: I0127 12:27:19.481539 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:19 crc kubenswrapper[4900]: E0127 12:27:19.482108 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:27:19 crc kubenswrapper[4900]: E0127 12:27:19.482179 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7gwzm" podUID="b6e155fd-bee9-4c32-9919-0dbee597003e" Jan 27 12:27:20 crc kubenswrapper[4900]: I0127 12:27:20.531408 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" event={"ID":"206345a2-ea7e-4a32-8c93-414290ba5c92","Type":"ContainerStarted","Data":"46b3f736b7277acf5ecd2088cbfad6ae734cbfcf9d4632447ccb3d3548208fc5"} Jan 27 12:27:21 crc kubenswrapper[4900]: I0127 12:27:21.215982 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:27:21 crc kubenswrapper[4900]: I0127 12:27:21.481172 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.481337 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 27 12:27:21 crc kubenswrapper[4900]: I0127 12:27:21.481442 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:21 crc kubenswrapper[4900]: I0127 12:27:21.481448 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:21 crc kubenswrapper[4900]: I0127 12:27:21.481707 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.481656 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.481918 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7gwzm" podUID="b6e155fd-bee9-4c32-9919-0dbee597003e" Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.481995 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 27 12:27:21 crc kubenswrapper[4900]: I0127 12:27:21.514491 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.514698 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:53.514667337 +0000 UTC m=+100.751695567 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:21 crc kubenswrapper[4900]: I0127 12:27:21.514829 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:21 crc kubenswrapper[4900]: I0127 12:27:21.514907 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.515122 4900 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.515244 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 12:27:53.515216373 +0000 UTC m=+100.752244613 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.515689 4900 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.515800 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-27 12:27:53.515780849 +0000 UTC m=+100.752809089 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 27 12:27:21 crc kubenswrapper[4900]: I0127 12:27:21.716424 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs\") pod \"network-metrics-daemon-7gwzm\" (UID: \"b6e155fd-bee9-4c32-9919-0dbee597003e\") " pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.716620 4900 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.716692 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs podName:b6e155fd-bee9-4c32-9919-0dbee597003e nodeName:}" failed. No retries permitted until 2026-01-27 12:27:37.716676137 +0000 UTC m=+84.953704347 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs") pod "network-metrics-daemon-7gwzm" (UID: "b6e155fd-bee9-4c32-9919-0dbee597003e") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 27 12:27:21 crc kubenswrapper[4900]: I0127 12:27:21.817160 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:21 crc kubenswrapper[4900]: I0127 12:27:21.817222 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.817429 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.817441 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.817483 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.817497 4900 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.817565 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-27 12:27:53.817542983 +0000 UTC m=+101.054571243 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.817463 4900 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.817612 4900 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:27:21 crc kubenswrapper[4900]: E0127 12:27:21.817691 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-27 12:27:53.817668136 +0000 UTC m=+101.054696366 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.475494 4900 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.475683 4900 kubelet_node_status.go:538] "Fast updating node status as it just became ready" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.515936 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-vrhjw"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.516853 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.517420 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-wndtt"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.517876 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.518019 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-n8cfh"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.518449 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.519083 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.519148 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.524688 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-config\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.524752 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-audit-dir\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.524821 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2b4fdd15-bb83-4db3-bf15-2101476b4000-audit-dir\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.524861 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.524895 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.524926 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.524950 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-image-import-ca\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.524975 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-audit-policies\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.524998 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgmzb\" (UniqueName: \"kubernetes.io/projected/2b4fdd15-bb83-4db3-bf15-2101476b4000-kube-api-access-cgmzb\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525024 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f693c83e-d25c-4f46-bbb6-fd38195cde95-serving-cert\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525046 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-serving-cert\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525088 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/511728ec-f571-4333-b0e2-c6a897c6c2d5-node-pullsecrets\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525121 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-config\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525144 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-audit-policies\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525167 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525217 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525245 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-etcd-client\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525270 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525295 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-client-ca\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525318 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q79z5\" (UniqueName: \"kubernetes.io/projected/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-kube-api-access-q79z5\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525346 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgxn4\" (UniqueName: \"kubernetes.io/projected/511728ec-f571-4333-b0e2-c6a897c6c2d5-kube-api-access-hgxn4\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525407 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525432 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525470 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-encryption-config\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525492 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525511 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-audit\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525533 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-trusted-ca-bundle\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525582 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/511728ec-f571-4333-b0e2-c6a897c6c2d5-serving-cert\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525605 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fx6r9\" (UniqueName: \"kubernetes.io/projected/f693c83e-d25c-4f46-bbb6-fd38195cde95-kube-api-access-fx6r9\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525629 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525651 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525719 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525741 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525773 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-etcd-serving-ca\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525805 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525828 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/511728ec-f571-4333-b0e2-c6a897c6c2d5-audit-dir\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525852 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/511728ec-f571-4333-b0e2-c6a897c6c2d5-etcd-client\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.525879 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/511728ec-f571-4333-b0e2-c6a897c6c2d5-encryption-config\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.531178 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.531801 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.545675 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.563072 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wrmdd"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.563886 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.564450 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.565802 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.566114 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.566710 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.569239 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.572778 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.572978 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.573531 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.573567 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.574192 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.574350 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.574473 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.574574 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.574654 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.574731 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.574748 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.574788 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.574884 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.574915 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.575045 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.575115 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.575130 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.575251 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.575393 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.575509 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.575533 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.575653 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.575700 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.575776 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.575824 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.575925 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.575959 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.575977 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.576065 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.576174 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.576198 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.576291 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.576337 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.576443 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.576572 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.576648 4900 generic.go:334] "Generic (PLEG): container finished" podID="206345a2-ea7e-4a32-8c93-414290ba5c92" containerID="46b3f736b7277acf5ecd2088cbfad6ae734cbfcf9d4632447ccb3d3548208fc5" exitCode=0 Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.576737 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" event={"ID":"206345a2-ea7e-4a32-8c93-414290ba5c92","Type":"ContainerDied","Data":"46b3f736b7277acf5ecd2088cbfad6ae734cbfcf9d4632447ccb3d3548208fc5"} Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.576771 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.576837 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.576956 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.577046 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.577208 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.577346 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.577639 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-5zbpj"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.578141 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.579933 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.580513 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.580989 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.581376 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.581480 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.583699 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rwdmj"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.584253 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.584635 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.584833 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-st975"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.585371 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.587225 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-rwdmj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.588679 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.591318 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-zbp4l"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.591947 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-zbp4l" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.593287 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.595533 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.598187 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-s7kkg"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.612813 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-x96hr"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.617696 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.630894 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.630960 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.631466 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.631588 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644371 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9080a9b0-9613-4077-bbbc-6ff558b4180c-client-ca\") pod \"route-controller-manager-6576b87f9c-52vsm\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644433 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/55206b88-053e-4616-bfb0-82f5d8a2d4f9-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wrmdd\" (UID: \"55206b88-053e-4616-bfb0-82f5d8a2d4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644467 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2c068a00-4049-443f-b5a6-deec4c086d13-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-rfgbs\" (UID: \"2c068a00-4049-443f-b5a6-deec4c086d13\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644492 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644514 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-audit\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644536 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-trusted-ca-bundle\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644571 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-encryption-config\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644595 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/511728ec-f571-4333-b0e2-c6a897c6c2d5-serving-cert\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644615 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-service-ca\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644650 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644675 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fx6r9\" (UniqueName: \"kubernetes.io/projected/f693c83e-d25c-4f46-bbb6-fd38195cde95-kube-api-access-fx6r9\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644698 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644719 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644744 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcq7s\" (UniqueName: \"kubernetes.io/projected/2c068a00-4049-443f-b5a6-deec4c086d13-kube-api-access-bcq7s\") pod \"cluster-image-registry-operator-dc59b4c8b-rfgbs\" (UID: \"2c068a00-4049-443f-b5a6-deec4c086d13\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644772 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644799 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-etcd-serving-ca\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644837 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644862 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6893cb7a-209f-4822-9e82-34ad39c7647f-console-oauth-config\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.644905 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-console-config\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.645164 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-zvswh"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.645561 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.645952 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.646361 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.646558 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.654574 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sm425\" (UniqueName: \"kubernetes.io/projected/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-kube-api-access-sm425\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.654680 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/511728ec-f571-4333-b0e2-c6a897c6c2d5-audit-dir\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.654720 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-service-ca-bundle\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.654796 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/511728ec-f571-4333-b0e2-c6a897c6c2d5-audit-dir\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.654845 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/511728ec-f571-4333-b0e2-c6a897c6c2d5-encryption-config\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.654865 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6893cb7a-209f-4822-9e82-34ad39c7647f-console-serving-cert\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.654883 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drr87\" (UniqueName: \"kubernetes.io/projected/6893cb7a-209f-4822-9e82-34ad39c7647f-kube-api-access-drr87\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.654947 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/511728ec-f571-4333-b0e2-c6a897c6c2d5-etcd-client\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.654971 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvgvk\" (UniqueName: \"kubernetes.io/projected/55206b88-053e-4616-bfb0-82f5d8a2d4f9-kube-api-access-bvgvk\") pod \"machine-api-operator-5694c8668f-wrmdd\" (UID: \"55206b88-053e-4616-bfb0-82f5d8a2d4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.654988 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-trusted-ca-bundle\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.656365 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.656524 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-fw6lf"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.656596 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.656702 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.656781 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.658452 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-encryption-config\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.658642 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d6c295a-54fe-47c5-b587-940db5e2589b-config\") pod \"machine-approver-56656f9798-st975\" (UID: \"7d6c295a-54fe-47c5-b587-940db5e2589b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.662624 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/511728ec-f571-4333-b0e2-c6a897c6c2d5-encryption-config\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.662997 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.663728 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.663715 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnzkd"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.660422 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.661095 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.659881 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-etcd-serving-ca\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.661373 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.661433 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.661830 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.662154 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.662240 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.663703 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.663961 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.663989 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.664042 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.664399 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.667895 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-audit\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.667997 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.668249 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.668668 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.668713 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.668747 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.668874 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.669270 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.669970 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f5eb80d8-5f29-4bd8-837f-c55402a17fe6-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-tc5cb\" (UID: \"f5eb80d8-5f29-4bd8-837f-c55402a17fe6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.672276 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-config\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.672441 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9080a9b0-9613-4077-bbbc-6ff558b4180c-config\") pod \"route-controller-manager-6576b87f9c-52vsm\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.672550 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlwj8\" (UniqueName: \"kubernetes.io/projected/9080a9b0-9613-4077-bbbc-6ff558b4180c-kube-api-access-rlwj8\") pod \"route-controller-manager-6576b87f9c-52vsm\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.672824 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9k2ks\" (UniqueName: \"kubernetes.io/projected/e4c1d903-be2e-4216-bc7a-e17c23bd6e63-kube-api-access-9k2ks\") pod \"dns-operator-744455d44c-rwdmj\" (UID: \"e4c1d903-be2e-4216-bc7a-e17c23bd6e63\") " pod="openshift-dns-operator/dns-operator-744455d44c-rwdmj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.672943 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-audit-dir\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.673082 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2b4fdd15-bb83-4db3-bf15-2101476b4000-audit-dir\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.673186 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/2c068a00-4049-443f-b5a6-deec4c086d13-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-rfgbs\" (UID: \"2c068a00-4049-443f-b5a6-deec4c086d13\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.673335 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/55206b88-053e-4616-bfb0-82f5d8a2d4f9-images\") pod \"machine-api-operator-5694c8668f-wrmdd\" (UID: \"55206b88-053e-4616-bfb0-82f5d8a2d4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.673437 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.673543 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.673639 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-config\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.673764 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-image-import-ca\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.673866 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.673968 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgmzb\" (UniqueName: \"kubernetes.io/projected/2b4fdd15-bb83-4db3-bf15-2101476b4000-kube-api-access-cgmzb\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.674080 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9080a9b0-9613-4077-bbbc-6ff558b4180c-serving-cert\") pod \"route-controller-manager-6576b87f9c-52vsm\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.674148 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-config\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.671161 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.674277 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/7d6c295a-54fe-47c5-b587-940db5e2589b-machine-approver-tls\") pod \"machine-approver-56656f9798-st975\" (UID: \"7d6c295a-54fe-47c5-b587-940db5e2589b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.674425 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55206b88-053e-4616-bfb0-82f5d8a2d4f9-config\") pod \"machine-api-operator-5694c8668f-wrmdd\" (UID: \"55206b88-053e-4616-bfb0-82f5d8a2d4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.674525 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-audit-policies\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.674736 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f693c83e-d25c-4f46-bbb6-fd38195cde95-serving-cert\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.674842 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/511728ec-f571-4333-b0e2-c6a897c6c2d5-node-pullsecrets\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.674964 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-serving-cert\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.675319 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.675174 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.675462 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-config\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.675606 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/511728ec-f571-4333-b0e2-c6a897c6c2d5-node-pullsecrets\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.674299 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.675582 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-image-import-ca\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.673773 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-audit-dir\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.671310 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnzkd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.675726 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-audit-policies\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.674330 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2b4fdd15-bb83-4db3-bf15-2101476b4000-audit-dir\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.676136 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.674454 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.675050 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.676328 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7d6c295a-54fe-47c5-b587-940db5e2589b-auth-proxy-config\") pod \"machine-approver-56656f9798-st975\" (UID: \"7d6c295a-54fe-47c5-b587-940db5e2589b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.676439 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.676555 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-audit-policies\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.676654 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.676746 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-p297m"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.677312 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.677965 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.676752 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69926\" (UniqueName: \"kubernetes.io/projected/75d8fa11-eb06-4aae-8e96-3bb4328d69d7-kube-api-access-69926\") pod \"downloads-7954f5f757-zbp4l\" (UID: \"75d8fa11-eb06-4aae-8e96-3bb4328d69d7\") " pod="openshift-console/downloads-7954f5f757-zbp4l" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678412 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678456 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e4c1d903-be2e-4216-bc7a-e17c23bd6e63-metrics-tls\") pod \"dns-operator-744455d44c-rwdmj\" (UID: \"e4c1d903-be2e-4216-bc7a-e17c23bd6e63\") " pod="openshift-dns-operator/dns-operator-744455d44c-rwdmj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678484 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-etcd-client\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678508 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-client-ca\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678527 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q79z5\" (UniqueName: \"kubernetes.io/projected/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-kube-api-access-q79z5\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678547 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggw97\" (UniqueName: \"kubernetes.io/projected/f5eb80d8-5f29-4bd8-837f-c55402a17fe6-kube-api-access-ggw97\") pod \"openshift-apiserver-operator-796bbdcf4f-tc5cb\" (UID: \"f5eb80d8-5f29-4bd8-837f-c55402a17fe6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678571 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2c068a00-4049-443f-b5a6-deec4c086d13-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-rfgbs\" (UID: \"2c068a00-4049-443f-b5a6-deec4c086d13\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678599 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgxn4\" (UniqueName: \"kubernetes.io/projected/511728ec-f571-4333-b0e2-c6a897c6c2d5-kube-api-access-hgxn4\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678622 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6822\" (UniqueName: \"kubernetes.io/projected/7d6c295a-54fe-47c5-b587-940db5e2589b-kube-api-access-h6822\") pod \"machine-approver-56656f9798-st975\" (UID: \"7d6c295a-54fe-47c5-b587-940db5e2589b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678641 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-serving-cert\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678677 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5eb80d8-5f29-4bd8-837f-c55402a17fe6-config\") pod \"openshift-apiserver-operator-796bbdcf4f-tc5cb\" (UID: \"f5eb80d8-5f29-4bd8-837f-c55402a17fe6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678715 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678773 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678800 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-oauth-serving-cert\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678827 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.679119 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.679707 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-config\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.680807 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.682028 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.672646 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.683942 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.686027 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-7fkdm"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.686133 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/511728ec-f571-4333-b0e2-c6a897c6c2d5-serving-cert\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.688128 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-client-ca\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.689162 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-audit-policies\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.675541 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.691671 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.677864 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.691804 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/511728ec-f571-4333-b0e2-c6a897c6c2d5-trusted-ca-bundle\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.691822 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.677981 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678042 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678132 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.678268 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.682608 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.684865 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.689988 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.690191 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.690379 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.690650 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.690947 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.692242 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.695512 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.695667 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.695859 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.697324 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.697585 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-7fkdm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.697821 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.698709 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.718869 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.719982 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/511728ec-f571-4333-b0e2-c6a897c6c2d5-etcd-client\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.720249 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.720757 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.721101 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.721341 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f693c83e-d25c-4f46-bbb6-fd38195cde95-serving-cert\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.721750 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.721847 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.721991 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.722857 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.722842 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.725566 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-etcd-client\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.725690 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-cr4gz"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.727306 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.728778 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-serving-cert\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.730102 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.732128 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.735829 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8kcp8"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.738273 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.740901 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.744191 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.744999 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.745135 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.745998 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.746262 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.762158 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.764232 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.764661 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.770271 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.772033 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.772988 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.779791 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fc6f\" (UniqueName: \"kubernetes.io/projected/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-kube-api-access-6fc6f\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.779837 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ef778856-8cbe-4b13-90f6-74bd28af2c86-available-featuregates\") pod \"openshift-config-operator-7777fb866f-v5vsd\" (UID: \"ef778856-8cbe-4b13-90f6-74bd28af2c86\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.779886 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggw97\" (UniqueName: \"kubernetes.io/projected/f5eb80d8-5f29-4bd8-837f-c55402a17fe6-kube-api-access-ggw97\") pod \"openshift-apiserver-operator-796bbdcf4f-tc5cb\" (UID: \"f5eb80d8-5f29-4bd8-837f-c55402a17fe6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.779913 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6822\" (UniqueName: \"kubernetes.io/projected/7d6c295a-54fe-47c5-b587-940db5e2589b-kube-api-access-h6822\") pod \"machine-approver-56656f9798-st975\" (UID: \"7d6c295a-54fe-47c5-b587-940db5e2589b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.779943 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8b9b210a-0ed1-438e-af3b-14c5db643e15-trusted-ca\") pod \"ingress-operator-5b745b69d9-p297m\" (UID: \"8b9b210a-0ed1-438e-af3b-14c5db643e15\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.779969 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/557eda06-2394-4c8c-82c1-dbc08a122232-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-vht44\" (UID: \"557eda06-2394-4c8c-82c1-dbc08a122232\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.779993 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/398f26a6-4944-4c9b-926a-b4ef22eb2a1f-profile-collector-cert\") pod \"catalog-operator-68c6474976-zdrjw\" (UID: \"398f26a6-4944-4c9b-926a-b4ef22eb2a1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.780021 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5eb80d8-5f29-4bd8-837f-c55402a17fe6-config\") pod \"openshift-apiserver-operator-796bbdcf4f-tc5cb\" (UID: \"f5eb80d8-5f29-4bd8-837f-c55402a17fe6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.780085 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2c068a00-4049-443f-b5a6-deec4c086d13-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-rfgbs\" (UID: \"2c068a00-4049-443f-b5a6-deec4c086d13\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.780110 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ef778856-8cbe-4b13-90f6-74bd28af2c86-serving-cert\") pod \"openshift-config-operator-7777fb866f-v5vsd\" (UID: \"ef778856-8cbe-4b13-90f6-74bd28af2c86\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.780149 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-service-ca\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.780174 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/64e28912-899a-4d7f-8c6a-dffc3ca9f1b7-proxy-tls\") pod \"machine-config-controller-84d6567774-gs2tm\" (UID: \"64e28912-899a-4d7f-8c6a-dffc3ca9f1b7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.780697 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5eb80d8-5f29-4bd8-837f-c55402a17fe6-config\") pod \"openshift-apiserver-operator-796bbdcf4f-tc5cb\" (UID: \"f5eb80d8-5f29-4bd8-837f-c55402a17fe6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.780753 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krjvt\" (UniqueName: \"kubernetes.io/projected/6561ed20-6634-4df7-891e-3c7f3e9427b0-kube-api-access-krjvt\") pod \"multus-admission-controller-857f4d67dd-7fkdm\" (UID: \"6561ed20-6634-4df7-891e-3c7f3e9427b0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-7fkdm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.780793 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0703acf1-af71-4249-b1ef-e19c6beb4d86-metrics-certs\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.780817 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcq7s\" (UniqueName: \"kubernetes.io/projected/2c068a00-4049-443f-b5a6-deec4c086d13-kube-api-access-bcq7s\") pod \"cluster-image-registry-operator-dc59b4c8b-rfgbs\" (UID: \"2c068a00-4049-443f-b5a6-deec4c086d13\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.780837 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8b9b210a-0ed1-438e-af3b-14c5db643e15-metrics-tls\") pod \"ingress-operator-5b745b69d9-p297m\" (UID: \"8b9b210a-0ed1-438e-af3b-14c5db643e15\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.780865 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bddbs\" (UniqueName: \"kubernetes.io/projected/8b9b210a-0ed1-438e-af3b-14c5db643e15-kube-api-access-bddbs\") pod \"ingress-operator-5b745b69d9-p297m\" (UID: \"8b9b210a-0ed1-438e-af3b-14c5db643e15\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.780938 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/d9879637-6084-4966-9f47-2b99f22ea469-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-2g4jf\" (UID: \"d9879637-6084-4966-9f47-2b99f22ea469\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.780965 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-etcd-service-ca\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.780988 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6893cb7a-209f-4822-9e82-34ad39c7647f-console-oauth-config\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781014 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rwsl\" (UniqueName: \"kubernetes.io/projected/398f26a6-4944-4c9b-926a-b4ef22eb2a1f-kube-api-access-7rwsl\") pod \"catalog-operator-68c6474976-zdrjw\" (UID: \"398f26a6-4944-4c9b-926a-b4ef22eb2a1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781309 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-service-ca\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781392 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/0703acf1-af71-4249-b1ef-e19c6beb4d86-default-certificate\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781423 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6561ed20-6634-4df7-891e-3c7f3e9427b0-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-7fkdm\" (UID: \"6561ed20-6634-4df7-891e-3c7f3e9427b0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-7fkdm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781454 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-trusted-ca-bundle\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781508 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sf77\" (UniqueName: \"kubernetes.io/projected/64e28912-899a-4d7f-8c6a-dffc3ca9f1b7-kube-api-access-5sf77\") pod \"machine-config-controller-84d6567774-gs2tm\" (UID: \"64e28912-899a-4d7f-8c6a-dffc3ca9f1b7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781541 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d6c295a-54fe-47c5-b587-940db5e2589b-config\") pod \"machine-approver-56656f9798-st975\" (UID: \"7d6c295a-54fe-47c5-b587-940db5e2589b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781582 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f5eb80d8-5f29-4bd8-837f-c55402a17fe6-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-tc5cb\" (UID: \"f5eb80d8-5f29-4bd8-837f-c55402a17fe6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781612 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0703acf1-af71-4249-b1ef-e19c6beb4d86-service-ca-bundle\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781635 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9080a9b0-9613-4077-bbbc-6ff558b4180c-config\") pod \"route-controller-manager-6576b87f9c-52vsm\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781668 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-etcd-ca\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781684 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8nbg\" (UniqueName: \"kubernetes.io/projected/ef778856-8cbe-4b13-90f6-74bd28af2c86-kube-api-access-k8nbg\") pod \"openshift-config-operator-7777fb866f-v5vsd\" (UID: \"ef778856-8cbe-4b13-90f6-74bd28af2c86\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781708 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/2c068a00-4049-443f-b5a6-deec4c086d13-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-rfgbs\" (UID: \"2c068a00-4049-443f-b5a6-deec4c086d13\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781724 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvjm6\" (UniqueName: \"kubernetes.io/projected/d9879637-6084-4966-9f47-2b99f22ea469-kube-api-access-wvjm6\") pod \"cluster-samples-operator-665b6dd947-2g4jf\" (UID: \"d9879637-6084-4966-9f47-2b99f22ea469\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781855 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/55206b88-053e-4616-bfb0-82f5d8a2d4f9-images\") pod \"machine-api-operator-5694c8668f-wrmdd\" (UID: \"55206b88-053e-4616-bfb0-82f5d8a2d4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781874 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/7d6c295a-54fe-47c5-b587-940db5e2589b-machine-approver-tls\") pod \"machine-approver-56656f9798-st975\" (UID: \"7d6c295a-54fe-47c5-b587-940db5e2589b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781892 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9080a9b0-9613-4077-bbbc-6ff558b4180c-serving-cert\") pod \"route-controller-manager-6576b87f9c-52vsm\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781908 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/557eda06-2394-4c8c-82c1-dbc08a122232-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-vht44\" (UID: \"557eda06-2394-4c8c-82c1-dbc08a122232\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781925 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0c526262-6ee4-4526-91d7-614b3cd91082-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-8kcp8\" (UID: \"0c526262-6ee4-4526-91d7-614b3cd91082\") " pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.781945 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/f4d2ff4d-7d50-461f-8ea3-57fdd1be7214-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nnzkd\" (UID: \"f4d2ff4d-7d50-461f-8ea3-57fdd1be7214\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnzkd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782014 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hg79h\" (UniqueName: \"kubernetes.io/projected/08f894ed-4dd9-4e81-8051-4a1024f93a0b-kube-api-access-hg79h\") pod \"machine-config-operator-74547568cd-b4rxs\" (UID: \"08f894ed-4dd9-4e81-8051-4a1024f93a0b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782033 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e4c1d903-be2e-4216-bc7a-e17c23bd6e63-metrics-tls\") pod \"dns-operator-744455d44c-rwdmj\" (UID: \"e4c1d903-be2e-4216-bc7a-e17c23bd6e63\") " pod="openshift-dns-operator/dns-operator-744455d44c-rwdmj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782068 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-serving-cert\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782090 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2c068a00-4049-443f-b5a6-deec4c086d13-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-rfgbs\" (UID: \"2c068a00-4049-443f-b5a6-deec4c086d13\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782110 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-serving-cert\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782128 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/08f894ed-4dd9-4e81-8051-4a1024f93a0b-auth-proxy-config\") pod \"machine-config-operator-74547568cd-b4rxs\" (UID: \"08f894ed-4dd9-4e81-8051-4a1024f93a0b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782143 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tml9z\" (UniqueName: \"kubernetes.io/projected/f4d2ff4d-7d50-461f-8ea3-57fdd1be7214-kube-api-access-tml9z\") pod \"control-plane-machine-set-operator-78cbb6b69f-nnzkd\" (UID: \"f4d2ff4d-7d50-461f-8ea3-57fdd1be7214\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnzkd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782159 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/08f894ed-4dd9-4e81-8051-4a1024f93a0b-images\") pod \"machine-config-operator-74547568cd-b4rxs\" (UID: \"08f894ed-4dd9-4e81-8051-4a1024f93a0b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782183 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-oauth-serving-cert\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782200 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782217 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9080a9b0-9613-4077-bbbc-6ff558b4180c-client-ca\") pod \"route-controller-manager-6576b87f9c-52vsm\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782236 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/55206b88-053e-4616-bfb0-82f5d8a2d4f9-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wrmdd\" (UID: \"55206b88-053e-4616-bfb0-82f5d8a2d4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782254 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a3973a7c-f509-4769-93a6-4f71f99cc515-trusted-ca\") pod \"console-operator-58897d9998-x96hr\" (UID: \"a3973a7c-f509-4769-93a6-4f71f99cc515\") " pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782283 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3973a7c-f509-4769-93a6-4f71f99cc515-serving-cert\") pod \"console-operator-58897d9998-x96hr\" (UID: \"a3973a7c-f509-4769-93a6-4f71f99cc515\") " pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782279 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2c068a00-4049-443f-b5a6-deec4c086d13-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-rfgbs\" (UID: \"2c068a00-4049-443f-b5a6-deec4c086d13\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782326 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-console-config\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782343 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sm425\" (UniqueName: \"kubernetes.io/projected/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-kube-api-access-sm425\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782361 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3973a7c-f509-4769-93a6-4f71f99cc515-config\") pod \"console-operator-58897d9998-x96hr\" (UID: \"a3973a7c-f509-4769-93a6-4f71f99cc515\") " pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782378 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-service-ca-bundle\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782412 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-config\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782431 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drr87\" (UniqueName: \"kubernetes.io/projected/6893cb7a-209f-4822-9e82-34ad39c7647f-kube-api-access-drr87\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782449 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6893cb7a-209f-4822-9e82-34ad39c7647f-console-serving-cert\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782659 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvgvk\" (UniqueName: \"kubernetes.io/projected/55206b88-053e-4616-bfb0-82f5d8a2d4f9-kube-api-access-bvgvk\") pod \"machine-api-operator-5694c8668f-wrmdd\" (UID: \"55206b88-053e-4616-bfb0-82f5d8a2d4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782690 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/08f894ed-4dd9-4e81-8051-4a1024f93a0b-proxy-tls\") pod \"machine-config-operator-74547568cd-b4rxs\" (UID: \"08f894ed-4dd9-4e81-8051-4a1024f93a0b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782708 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlwj8\" (UniqueName: \"kubernetes.io/projected/9080a9b0-9613-4077-bbbc-6ff558b4180c-kube-api-access-rlwj8\") pod \"route-controller-manager-6576b87f9c-52vsm\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782724 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9k2ks\" (UniqueName: \"kubernetes.io/projected/e4c1d903-be2e-4216-bc7a-e17c23bd6e63-kube-api-access-9k2ks\") pod \"dns-operator-744455d44c-rwdmj\" (UID: \"e4c1d903-be2e-4216-bc7a-e17c23bd6e63\") " pod="openshift-dns-operator/dns-operator-744455d44c-rwdmj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782735 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-trusted-ca-bundle\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782741 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/64e28912-899a-4d7f-8c6a-dffc3ca9f1b7-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-gs2tm\" (UID: \"64e28912-899a-4d7f-8c6a-dffc3ca9f1b7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782782 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8b9b210a-0ed1-438e-af3b-14c5db643e15-bound-sa-token\") pod \"ingress-operator-5b745b69d9-p297m\" (UID: \"8b9b210a-0ed1-438e-af3b-14c5db643e15\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782813 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0c526262-6ee4-4526-91d7-614b3cd91082-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-8kcp8\" (UID: \"0c526262-6ee4-4526-91d7-614b3cd91082\") " pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782837 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/398f26a6-4944-4c9b-926a-b4ef22eb2a1f-srv-cert\") pod \"catalog-operator-68c6474976-zdrjw\" (UID: \"398f26a6-4944-4c9b-926a-b4ef22eb2a1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782859 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9tvq\" (UniqueName: \"kubernetes.io/projected/0c526262-6ee4-4526-91d7-614b3cd91082-kube-api-access-p9tvq\") pod \"marketplace-operator-79b997595-8kcp8\" (UID: \"0c526262-6ee4-4526-91d7-614b3cd91082\") " pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782890 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-config\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782923 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-etcd-client\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782952 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/0703acf1-af71-4249-b1ef-e19c6beb4d86-stats-auth\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.782981 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55206b88-053e-4616-bfb0-82f5d8a2d4f9-config\") pod \"machine-api-operator-5694c8668f-wrmdd\" (UID: \"55206b88-053e-4616-bfb0-82f5d8a2d4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.783006 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g7cq\" (UniqueName: \"kubernetes.io/projected/0703acf1-af71-4249-b1ef-e19c6beb4d86-kube-api-access-7g7cq\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.783038 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sh4ss\" (UniqueName: \"kubernetes.io/projected/a3973a7c-f509-4769-93a6-4f71f99cc515-kube-api-access-sh4ss\") pod \"console-operator-58897d9998-x96hr\" (UID: \"a3973a7c-f509-4769-93a6-4f71f99cc515\") " pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.785808 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9080a9b0-9613-4077-bbbc-6ff558b4180c-client-ca\") pod \"route-controller-manager-6576b87f9c-52vsm\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.787316 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7d6c295a-54fe-47c5-b587-940db5e2589b-auth-proxy-config\") pod \"machine-approver-56656f9798-st975\" (UID: \"7d6c295a-54fe-47c5-b587-940db5e2589b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.787378 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69926\" (UniqueName: \"kubernetes.io/projected/75d8fa11-eb06-4aae-8e96-3bb4328d69d7-kube-api-access-69926\") pod \"downloads-7954f5f757-zbp4l\" (UID: \"75d8fa11-eb06-4aae-8e96-3bb4328d69d7\") " pod="openshift-console/downloads-7954f5f757-zbp4l" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.787406 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-566sl\" (UniqueName: \"kubernetes.io/projected/557eda06-2394-4c8c-82c1-dbc08a122232-kube-api-access-566sl\") pod \"openshift-controller-manager-operator-756b6f6bc6-vht44\" (UID: \"557eda06-2394-4c8c-82c1-dbc08a122232\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.787488 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.785872 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-oauth-serving-cert\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.787917 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.788735 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.789079 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7d6c295a-54fe-47c5-b587-940db5e2589b-auth-proxy-config\") pod \"machine-approver-56656f9798-st975\" (UID: \"7d6c295a-54fe-47c5-b587-940db5e2589b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.789204 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/55206b88-053e-4616-bfb0-82f5d8a2d4f9-images\") pod \"machine-api-operator-5694c8668f-wrmdd\" (UID: \"55206b88-053e-4616-bfb0-82f5d8a2d4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.788848 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9080a9b0-9613-4077-bbbc-6ff558b4180c-config\") pod \"route-controller-manager-6576b87f9c-52vsm\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.789556 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.790480 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d6c295a-54fe-47c5-b587-940db5e2589b-config\") pod \"machine-approver-56656f9798-st975\" (UID: \"7d6c295a-54fe-47c5-b587-940db5e2589b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.791534 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6893cb7a-209f-4822-9e82-34ad39c7647f-console-oauth-config\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.791602 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-serving-cert\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.793320 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-lld9r"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.793992 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-lld9r" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.794093 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e4c1d903-be2e-4216-bc7a-e17c23bd6e63-metrics-tls\") pod \"dns-operator-744455d44c-rwdmj\" (UID: \"e4c1d903-be2e-4216-bc7a-e17c23bd6e63\") " pod="openshift-dns-operator/dns-operator-744455d44c-rwdmj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.794970 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fx6r9\" (UniqueName: \"kubernetes.io/projected/f693c83e-d25c-4f46-bbb6-fd38195cde95-kube-api-access-fx6r9\") pod \"controller-manager-879f6c89f-n8cfh\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.795409 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55206b88-053e-4616-bfb0-82f5d8a2d4f9-config\") pod \"machine-api-operator-5694c8668f-wrmdd\" (UID: \"55206b88-053e-4616-bfb0-82f5d8a2d4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.795502 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9080a9b0-9613-4077-bbbc-6ff558b4180c-serving-cert\") pod \"route-controller-manager-6576b87f9c-52vsm\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.798705 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.799048 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/7d6c295a-54fe-47c5-b587-940db5e2589b-machine-approver-tls\") pod \"machine-approver-56656f9798-st975\" (UID: \"7d6c295a-54fe-47c5-b587-940db5e2589b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.799618 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.801722 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-pdc9s"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.802140 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-service-ca-bundle\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.803881 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.802452 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-pdc9s" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.804432 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.802381 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-config\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.804794 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6893cb7a-209f-4822-9e82-34ad39c7647f-console-serving-cert\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.804907 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-console-config\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.805138 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/55206b88-053e-4616-bfb0-82f5d8a2d4f9-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wrmdd\" (UID: \"55206b88-053e-4616-bfb0-82f5d8a2d4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.805773 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/2c068a00-4049-443f-b5a6-deec4c086d13-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-rfgbs\" (UID: \"2c068a00-4049-443f-b5a6-deec4c086d13\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.809729 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f5eb80d8-5f29-4bd8-837f-c55402a17fe6-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-tc5cb\" (UID: \"f5eb80d8-5f29-4bd8-837f-c55402a17fe6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.815874 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.818749 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.821191 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.821418 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.822033 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.823260 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.823845 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.828721 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.836015 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-n8cfh"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.837846 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wrmdd"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.838672 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.839784 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-vrhjw"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.840039 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.841458 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rwdmj"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.842088 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-zbp4l"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.843923 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-clhb8"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.844891 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.847570 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.852031 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.862919 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.869744 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.871961 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.872357 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.876325 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-wndtt"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.877945 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.879513 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-p297m"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.880736 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5zbpj"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.881565 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.882833 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.883691 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-x96hr"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.884647 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-7fkdm"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.888898 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/08f894ed-4dd9-4e81-8051-4a1024f93a0b-auth-proxy-config\") pod \"machine-config-operator-74547568cd-b4rxs\" (UID: \"08f894ed-4dd9-4e81-8051-4a1024f93a0b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.888973 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tml9z\" (UniqueName: \"kubernetes.io/projected/f4d2ff4d-7d50-461f-8ea3-57fdd1be7214-kube-api-access-tml9z\") pod \"control-plane-machine-set-operator-78cbb6b69f-nnzkd\" (UID: \"f4d2ff4d-7d50-461f-8ea3-57fdd1be7214\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnzkd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889010 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/08f894ed-4dd9-4e81-8051-4a1024f93a0b-images\") pod \"machine-config-operator-74547568cd-b4rxs\" (UID: \"08f894ed-4dd9-4e81-8051-4a1024f93a0b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889104 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a3973a7c-f509-4769-93a6-4f71f99cc515-trusted-ca\") pod \"console-operator-58897d9998-x96hr\" (UID: \"a3973a7c-f509-4769-93a6-4f71f99cc515\") " pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889161 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3973a7c-f509-4769-93a6-4f71f99cc515-serving-cert\") pod \"console-operator-58897d9998-x96hr\" (UID: \"a3973a7c-f509-4769-93a6-4f71f99cc515\") " pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889253 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3973a7c-f509-4769-93a6-4f71f99cc515-config\") pod \"console-operator-58897d9998-x96hr\" (UID: \"a3973a7c-f509-4769-93a6-4f71f99cc515\") " pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889292 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-config\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889376 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/08f894ed-4dd9-4e81-8051-4a1024f93a0b-proxy-tls\") pod \"machine-config-operator-74547568cd-b4rxs\" (UID: \"08f894ed-4dd9-4e81-8051-4a1024f93a0b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889418 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/64e28912-899a-4d7f-8c6a-dffc3ca9f1b7-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-gs2tm\" (UID: \"64e28912-899a-4d7f-8c6a-dffc3ca9f1b7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889466 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8b9b210a-0ed1-438e-af3b-14c5db643e15-bound-sa-token\") pod \"ingress-operator-5b745b69d9-p297m\" (UID: \"8b9b210a-0ed1-438e-af3b-14c5db643e15\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889680 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0c526262-6ee4-4526-91d7-614b3cd91082-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-8kcp8\" (UID: \"0c526262-6ee4-4526-91d7-614b3cd91082\") " pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889722 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/398f26a6-4944-4c9b-926a-b4ef22eb2a1f-srv-cert\") pod \"catalog-operator-68c6474976-zdrjw\" (UID: \"398f26a6-4944-4c9b-926a-b4ef22eb2a1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889757 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9tvq\" (UniqueName: \"kubernetes.io/projected/0c526262-6ee4-4526-91d7-614b3cd91082-kube-api-access-p9tvq\") pod \"marketplace-operator-79b997595-8kcp8\" (UID: \"0c526262-6ee4-4526-91d7-614b3cd91082\") " pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889795 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/0703acf1-af71-4249-b1ef-e19c6beb4d86-stats-auth\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889841 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-etcd-client\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889887 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g7cq\" (UniqueName: \"kubernetes.io/projected/0703acf1-af71-4249-b1ef-e19c6beb4d86-kube-api-access-7g7cq\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889919 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sh4ss\" (UniqueName: \"kubernetes.io/projected/a3973a7c-f509-4769-93a6-4f71f99cc515-kube-api-access-sh4ss\") pod \"console-operator-58897d9998-x96hr\" (UID: \"a3973a7c-f509-4769-93a6-4f71f99cc515\") " pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.889984 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-566sl\" (UniqueName: \"kubernetes.io/projected/557eda06-2394-4c8c-82c1-dbc08a122232-kube-api-access-566sl\") pod \"openshift-controller-manager-operator-756b6f6bc6-vht44\" (UID: \"557eda06-2394-4c8c-82c1-dbc08a122232\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.890224 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fc6f\" (UniqueName: \"kubernetes.io/projected/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-kube-api-access-6fc6f\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.890267 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ef778856-8cbe-4b13-90f6-74bd28af2c86-available-featuregates\") pod \"openshift-config-operator-7777fb866f-v5vsd\" (UID: \"ef778856-8cbe-4b13-90f6-74bd28af2c86\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.890306 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8b9b210a-0ed1-438e-af3b-14c5db643e15-trusted-ca\") pod \"ingress-operator-5b745b69d9-p297m\" (UID: \"8b9b210a-0ed1-438e-af3b-14c5db643e15\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.890333 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/557eda06-2394-4c8c-82c1-dbc08a122232-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-vht44\" (UID: \"557eda06-2394-4c8c-82c1-dbc08a122232\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.890362 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/398f26a6-4944-4c9b-926a-b4ef22eb2a1f-profile-collector-cert\") pod \"catalog-operator-68c6474976-zdrjw\" (UID: \"398f26a6-4944-4c9b-926a-b4ef22eb2a1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.890449 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ef778856-8cbe-4b13-90f6-74bd28af2c86-serving-cert\") pod \"openshift-config-operator-7777fb866f-v5vsd\" (UID: \"ef778856-8cbe-4b13-90f6-74bd28af2c86\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.890510 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/64e28912-899a-4d7f-8c6a-dffc3ca9f1b7-proxy-tls\") pod \"machine-config-controller-84d6567774-gs2tm\" (UID: \"64e28912-899a-4d7f-8c6a-dffc3ca9f1b7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.890538 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krjvt\" (UniqueName: \"kubernetes.io/projected/6561ed20-6634-4df7-891e-3c7f3e9427b0-kube-api-access-krjvt\") pod \"multus-admission-controller-857f4d67dd-7fkdm\" (UID: \"6561ed20-6634-4df7-891e-3c7f3e9427b0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-7fkdm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.890722 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0703acf1-af71-4249-b1ef-e19c6beb4d86-metrics-certs\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.890882 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8b9b210a-0ed1-438e-af3b-14c5db643e15-metrics-tls\") pod \"ingress-operator-5b745b69d9-p297m\" (UID: \"8b9b210a-0ed1-438e-af3b-14c5db643e15\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.890916 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bddbs\" (UniqueName: \"kubernetes.io/projected/8b9b210a-0ed1-438e-af3b-14c5db643e15-kube-api-access-bddbs\") pod \"ingress-operator-5b745b69d9-p297m\" (UID: \"8b9b210a-0ed1-438e-af3b-14c5db643e15\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.890990 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/d9879637-6084-4966-9f47-2b99f22ea469-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-2g4jf\" (UID: \"d9879637-6084-4966-9f47-2b99f22ea469\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.891043 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-etcd-service-ca\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.891086 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rwsl\" (UniqueName: \"kubernetes.io/projected/398f26a6-4944-4c9b-926a-b4ef22eb2a1f-kube-api-access-7rwsl\") pod \"catalog-operator-68c6474976-zdrjw\" (UID: \"398f26a6-4944-4c9b-926a-b4ef22eb2a1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.891125 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/0703acf1-af71-4249-b1ef-e19c6beb4d86-default-certificate\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.891165 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6561ed20-6634-4df7-891e-3c7f3e9427b0-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-7fkdm\" (UID: \"6561ed20-6634-4df7-891e-3c7f3e9427b0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-7fkdm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.891195 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sf77\" (UniqueName: \"kubernetes.io/projected/64e28912-899a-4d7f-8c6a-dffc3ca9f1b7-kube-api-access-5sf77\") pod \"machine-config-controller-84d6567774-gs2tm\" (UID: \"64e28912-899a-4d7f-8c6a-dffc3ca9f1b7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.891220 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0703acf1-af71-4249-b1ef-e19c6beb4d86-service-ca-bundle\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.891248 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-etcd-ca\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.891272 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8nbg\" (UniqueName: \"kubernetes.io/projected/ef778856-8cbe-4b13-90f6-74bd28af2c86-kube-api-access-k8nbg\") pod \"openshift-config-operator-7777fb866f-v5vsd\" (UID: \"ef778856-8cbe-4b13-90f6-74bd28af2c86\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.891358 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvjm6\" (UniqueName: \"kubernetes.io/projected/d9879637-6084-4966-9f47-2b99f22ea469-kube-api-access-wvjm6\") pod \"cluster-samples-operator-665b6dd947-2g4jf\" (UID: \"d9879637-6084-4966-9f47-2b99f22ea469\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.891391 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/557eda06-2394-4c8c-82c1-dbc08a122232-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-vht44\" (UID: \"557eda06-2394-4c8c-82c1-dbc08a122232\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.891427 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0c526262-6ee4-4526-91d7-614b3cd91082-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-8kcp8\" (UID: \"0c526262-6ee4-4526-91d7-614b3cd91082\") " pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.891470 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/f4d2ff4d-7d50-461f-8ea3-57fdd1be7214-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nnzkd\" (UID: \"f4d2ff4d-7d50-461f-8ea3-57fdd1be7214\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnzkd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.891509 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hg79h\" (UniqueName: \"kubernetes.io/projected/08f894ed-4dd9-4e81-8051-4a1024f93a0b-kube-api-access-hg79h\") pod \"machine-config-operator-74547568cd-b4rxs\" (UID: \"08f894ed-4dd9-4e81-8051-4a1024f93a0b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.891534 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-serving-cert\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.895315 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.902779 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/557eda06-2394-4c8c-82c1-dbc08a122232-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-vht44\" (UID: \"557eda06-2394-4c8c-82c1-dbc08a122232\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.903804 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ef778856-8cbe-4b13-90f6-74bd28af2c86-available-featuregates\") pod \"openshift-config-operator-7777fb866f-v5vsd\" (UID: \"ef778856-8cbe-4b13-90f6-74bd28af2c86\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.904780 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/08f894ed-4dd9-4e81-8051-4a1024f93a0b-auth-proxy-config\") pod \"machine-config-operator-74547568cd-b4rxs\" (UID: \"08f894ed-4dd9-4e81-8051-4a1024f93a0b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.904960 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3973a7c-f509-4769-93a6-4f71f99cc515-serving-cert\") pod \"console-operator-58897d9998-x96hr\" (UID: \"a3973a7c-f509-4769-93a6-4f71f99cc515\") " pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.905404 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/64e28912-899a-4d7f-8c6a-dffc3ca9f1b7-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-gs2tm\" (UID: \"64e28912-899a-4d7f-8c6a-dffc3ca9f1b7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.905885 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.906540 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/557eda06-2394-4c8c-82c1-dbc08a122232-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-vht44\" (UID: \"557eda06-2394-4c8c-82c1-dbc08a122232\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.908165 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ef778856-8cbe-4b13-90f6-74bd28af2c86-serving-cert\") pod \"openshift-config-operator-7777fb866f-v5vsd\" (UID: \"ef778856-8cbe-4b13-90f6-74bd28af2c86\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.910250 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0703acf1-af71-4249-b1ef-e19c6beb4d86-metrics-certs\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.910704 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-etcd-service-ca\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.911315 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0703acf1-af71-4249-b1ef-e19c6beb4d86-service-ca-bundle\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.913503 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/0703acf1-af71-4249-b1ef-e19c6beb4d86-default-certificate\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.913729 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/0703acf1-af71-4249-b1ef-e19c6beb4d86-stats-auth\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.920332 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a3973a7c-f509-4769-93a6-4f71f99cc515-trusted-ca\") pod \"console-operator-58897d9998-x96hr\" (UID: \"a3973a7c-f509-4769-93a6-4f71f99cc515\") " pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.920442 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3973a7c-f509-4769-93a6-4f71f99cc515-config\") pod \"console-operator-58897d9998-x96hr\" (UID: \"a3973a7c-f509-4769-93a6-4f71f99cc515\") " pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.933259 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-serving-cert\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.937184 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.938072 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-etcd-client\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.938373 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.941993 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.944714 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-lld9r"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.968620 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.968909 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq"] Jan 27 12:27:22 crc kubenswrapper[4900]: I0127 12:27:22.974228 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.003562 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.004704 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.004827 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-fw6lf"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.007837 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.008392 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-config\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.009643 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-etcd-ca\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.018249 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8kcp8"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.020661 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.026167 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-pdc9s"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.028070 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-s7kkg"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.029041 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnzkd"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.030841 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/f4d2ff4d-7d50-461f-8ea3-57fdd1be7214-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nnzkd\" (UID: \"f4d2ff4d-7d50-461f-8ea3-57fdd1be7214\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnzkd" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.031036 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.031974 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-cr4gz"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.032925 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.033885 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.034842 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.035817 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.036717 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-75clq"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.037669 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-75clq" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.037702 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-nssds"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.038316 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-nssds" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.038738 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nlrz5"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.039747 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.039861 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-nssds"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.040703 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nlrz5"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.045856 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-599cn"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.046634 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-599cn"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.046730 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-599cn" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.048613 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.054510 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgmzb\" (UniqueName: \"kubernetes.io/projected/2b4fdd15-bb83-4db3-bf15-2101476b4000-kube-api-access-cgmzb\") pod \"oauth-openshift-558db77b4-wndtt\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.062192 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.082167 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.103442 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.111886 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.115773 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/64e28912-899a-4d7f-8c6a-dffc3ca9f1b7-proxy-tls\") pod \"machine-config-controller-84d6567774-gs2tm\" (UID: \"64e28912-899a-4d7f-8c6a-dffc3ca9f1b7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.122209 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.139154 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.142378 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.148744 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8b9b210a-0ed1-438e-af3b-14c5db643e15-metrics-tls\") pod \"ingress-operator-5b745b69d9-p297m\" (UID: \"8b9b210a-0ed1-438e-af3b-14c5db643e15\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.176701 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.182392 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.185249 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8b9b210a-0ed1-438e-af3b-14c5db643e15-trusted-ca\") pod \"ingress-operator-5b745b69d9-p297m\" (UID: \"8b9b210a-0ed1-438e-af3b-14c5db643e15\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.204367 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.215111 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/08f894ed-4dd9-4e81-8051-4a1024f93a0b-images\") pod \"machine-config-operator-74547568cd-b4rxs\" (UID: \"08f894ed-4dd9-4e81-8051-4a1024f93a0b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.222487 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.242172 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.255207 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/08f894ed-4dd9-4e81-8051-4a1024f93a0b-proxy-tls\") pod \"machine-config-operator-74547568cd-b4rxs\" (UID: \"08f894ed-4dd9-4e81-8051-4a1024f93a0b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.257255 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-n8cfh"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.262079 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 27 12:27:23 crc kubenswrapper[4900]: W0127 12:27:23.266423 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf693c83e_d25c_4f46_bbb6_fd38195cde95.slice/crio-59013904bd1cec7f4cfdb89246afe7e3b64cc8c8e8bf66e716828c626139650e WatchSource:0}: Error finding container 59013904bd1cec7f4cfdb89246afe7e3b64cc8c8e8bf66e716828c626139650e: Status 404 returned error can't find the container with id 59013904bd1cec7f4cfdb89246afe7e3b64cc8c8e8bf66e716828c626139650e Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.280857 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.288792 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/d9879637-6084-4966-9f47-2b99f22ea469-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-2g4jf\" (UID: \"d9879637-6084-4966-9f47-2b99f22ea469\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.319083 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q79z5\" (UniqueName: \"kubernetes.io/projected/6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af-kube-api-access-q79z5\") pod \"apiserver-7bbb656c7d-csvht\" (UID: \"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.342226 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgxn4\" (UniqueName: \"kubernetes.io/projected/511728ec-f571-4333-b0e2-c6a897c6c2d5-kube-api-access-hgxn4\") pod \"apiserver-76f77b778f-vrhjw\" (UID: \"511728ec-f571-4333-b0e2-c6a897c6c2d5\") " pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.343035 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.358873 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-wndtt"] Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.362118 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 27 12:27:23 crc kubenswrapper[4900]: W0127 12:27:23.369617 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b4fdd15_bb83_4db3_bf15_2101476b4000.slice/crio-a699391860187feb1790c0c70ae02876e10bae3ae6add88c7d931a4f58bb2f78 WatchSource:0}: Error finding container a699391860187feb1790c0c70ae02876e10bae3ae6add88c7d931a4f58bb2f78: Status 404 returned error can't find the container with id a699391860187feb1790c0c70ae02876e10bae3ae6add88c7d931a4f58bb2f78 Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.380623 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.402002 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.421239 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.429381 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/398f26a6-4944-4c9b-926a-b4ef22eb2a1f-srv-cert\") pod \"catalog-operator-68c6474976-zdrjw\" (UID: \"398f26a6-4944-4c9b-926a-b4ef22eb2a1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.441354 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.447587 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/398f26a6-4944-4c9b-926a-b4ef22eb2a1f-profile-collector-cert\") pod \"catalog-operator-68c6474976-zdrjw\" (UID: \"398f26a6-4944-4c9b-926a-b4ef22eb2a1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.461821 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.478115 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.480937 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.480980 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.480941 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.481118 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.482051 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.501879 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.514492 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6561ed20-6634-4df7-891e-3c7f3e9427b0-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-7fkdm\" (UID: \"6561ed20-6634-4df7-891e-3c7f3e9427b0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-7fkdm" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.526206 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.527445 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.543485 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.561717 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.581200 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.583820 4900 generic.go:334] "Generic (PLEG): container finished" podID="206345a2-ea7e-4a32-8c93-414290ba5c92" containerID="98ea42db85f6c8d4855a0abdebfb9b1e325a58afb23effa468740cfcd78a8ff1" exitCode=0 Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.583898 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" event={"ID":"206345a2-ea7e-4a32-8c93-414290ba5c92","Type":"ContainerDied","Data":"98ea42db85f6c8d4855a0abdebfb9b1e325a58afb23effa468740cfcd78a8ff1"} Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.585480 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" event={"ID":"f693c83e-d25c-4f46-bbb6-fd38195cde95","Type":"ContainerStarted","Data":"8faa920a20e23b45c8b27f4a265ebd3ae522098b0d9dd02c2128f1841e648fc7"} Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.585511 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" event={"ID":"f693c83e-d25c-4f46-bbb6-fd38195cde95","Type":"ContainerStarted","Data":"59013904bd1cec7f4cfdb89246afe7e3b64cc8c8e8bf66e716828c626139650e"} Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.585716 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.587118 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" event={"ID":"2b4fdd15-bb83-4db3-bf15-2101476b4000","Type":"ContainerStarted","Data":"a699391860187feb1790c0c70ae02876e10bae3ae6add88c7d931a4f58bb2f78"} Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.587837 4900 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-n8cfh container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.587920 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" podUID="f693c83e-d25c-4f46-bbb6-fd38195cde95" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.601176 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.624969 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.662232 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.672710 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0c526262-6ee4-4526-91d7-614b3cd91082-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-8kcp8\" (UID: \"0c526262-6ee4-4526-91d7-614b3cd91082\") " pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.693846 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.708339 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.714698 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0c526262-6ee4-4526-91d7-614b3cd91082-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-8kcp8\" (UID: \"0c526262-6ee4-4526-91d7-614b3cd91082\") " pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.722775 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.744802 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.762810 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.780122 4900 request.go:700] Waited for 1.006601841s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-storage-version-migrator-operator/secrets?fieldSelector=metadata.name%3Dserving-cert&limit=500&resourceVersion=0 Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.782269 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.801417 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.820885 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.839543 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-vrhjw"] Jan 27 12:27:23 crc kubenswrapper[4900]: W0127 12:27:23.848811 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod511728ec_f571_4333_b0e2_c6a897c6c2d5.slice/crio-63b49c6f4d3e480f2b4d86a040f91f8c10e88cb7e850a277d1f93cf89d213adf WatchSource:0}: Error finding container 63b49c6f4d3e480f2b4d86a040f91f8c10e88cb7e850a277d1f93cf89d213adf: Status 404 returned error can't find the container with id 63b49c6f4d3e480f2b4d86a040f91f8c10e88cb7e850a277d1f93cf89d213adf Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.857472 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6822\" (UniqueName: \"kubernetes.io/projected/7d6c295a-54fe-47c5-b587-940db5e2589b-kube-api-access-h6822\") pod \"machine-approver-56656f9798-st975\" (UID: \"7d6c295a-54fe-47c5-b587-940db5e2589b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.875884 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcq7s\" (UniqueName: \"kubernetes.io/projected/2c068a00-4049-443f-b5a6-deec4c086d13-kube-api-access-bcq7s\") pod \"cluster-image-registry-operator-dc59b4c8b-rfgbs\" (UID: \"2c068a00-4049-443f-b5a6-deec4c086d13\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.901568 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggw97\" (UniqueName: \"kubernetes.io/projected/f5eb80d8-5f29-4bd8-837f-c55402a17fe6-kube-api-access-ggw97\") pod \"openshift-apiserver-operator-796bbdcf4f-tc5cb\" (UID: \"f5eb80d8-5f29-4bd8-837f-c55402a17fe6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.914077 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht"] Jan 27 12:27:23 crc kubenswrapper[4900]: W0127 12:27:23.918670 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6ff9b4ed_de5c_4fb9_9eea_9bb84d2f11af.slice/crio-74ac4b9ad975838ab19df219e2f4cc15dccde058baedf140ba94cacb9db6bae9 WatchSource:0}: Error finding container 74ac4b9ad975838ab19df219e2f4cc15dccde058baedf140ba94cacb9db6bae9: Status 404 returned error can't find the container with id 74ac4b9ad975838ab19df219e2f4cc15dccde058baedf140ba94cacb9db6bae9 Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.919562 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drr87\" (UniqueName: \"kubernetes.io/projected/6893cb7a-209f-4822-9e82-34ad39c7647f-kube-api-access-drr87\") pod \"console-f9d7485db-5zbpj\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.920438 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.935320 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2c068a00-4049-443f-b5a6-deec4c086d13-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-rfgbs\" (UID: \"2c068a00-4049-443f-b5a6-deec4c086d13\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.941281 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.961296 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.982194 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 27 12:27:23 crc kubenswrapper[4900]: I0127 12:27:23.988932 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.001451 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.015081 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.026974 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.053892 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvgvk\" (UniqueName: \"kubernetes.io/projected/55206b88-053e-4616-bfb0-82f5d8a2d4f9-kube-api-access-bvgvk\") pod \"machine-api-operator-5694c8668f-wrmdd\" (UID: \"55206b88-053e-4616-bfb0-82f5d8a2d4f9\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.057174 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69926\" (UniqueName: \"kubernetes.io/projected/75d8fa11-eb06-4aae-8e96-3bb4328d69d7-kube-api-access-69926\") pod \"downloads-7954f5f757-zbp4l\" (UID: \"75d8fa11-eb06-4aae-8e96-3bb4328d69d7\") " pod="openshift-console/downloads-7954f5f757-zbp4l" Jan 27 12:27:24 crc kubenswrapper[4900]: W0127 12:27:24.064983 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d6c295a_54fe_47c5_b587_940db5e2589b.slice/crio-66690bceee32a8bc93d4819a653121841be850b95d21ae638afcd699ac0c1bd3 WatchSource:0}: Error finding container 66690bceee32a8bc93d4819a653121841be850b95d21ae638afcd699ac0c1bd3: Status 404 returned error can't find the container with id 66690bceee32a8bc93d4819a653121841be850b95d21ae638afcd699ac0c1bd3 Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.083692 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlwj8\" (UniqueName: \"kubernetes.io/projected/9080a9b0-9613-4077-bbbc-6ff558b4180c-kube-api-access-rlwj8\") pod \"route-controller-manager-6576b87f9c-52vsm\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.100522 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sm425\" (UniqueName: \"kubernetes.io/projected/8a55139c-7a3d-4800-b2d1-8cc5270d6eaa-kube-api-access-sm425\") pod \"authentication-operator-69f744f599-s7kkg\" (UID: \"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.116737 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-zbp4l" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.126088 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9k2ks\" (UniqueName: \"kubernetes.io/projected/e4c1d903-be2e-4216-bc7a-e17c23bd6e63-kube-api-access-9k2ks\") pod \"dns-operator-744455d44c-rwdmj\" (UID: \"e4c1d903-be2e-4216-bc7a-e17c23bd6e63\") " pod="openshift-dns-operator/dns-operator-744455d44c-rwdmj" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.127996 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.142916 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.156956 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.161115 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.177641 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb"] Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.180461 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.200872 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.222110 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.245678 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.256929 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.265641 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.276614 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5zbpj"] Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.276789 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.282074 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.301021 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.323861 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.344007 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.344161 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-rwdmj" Jan 27 12:27:24 crc kubenswrapper[4900]: W0127 12:27:24.350210 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6893cb7a_209f_4822_9e82_34ad39c7647f.slice/crio-9bc72981497f9fae182be4cf549f713490b6afca9f945dfd443674a18c7e4c8a WatchSource:0}: Error finding container 9bc72981497f9fae182be4cf549f713490b6afca9f945dfd443674a18c7e4c8a: Status 404 returned error can't find the container with id 9bc72981497f9fae182be4cf549f713490b6afca9f945dfd443674a18c7e4c8a Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.360848 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.448167 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.448640 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.448881 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.457190 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.462254 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.508598 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.515816 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs"] Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.523420 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.541377 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.541583 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-zbp4l"] Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.562081 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.581330 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.611696 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.614972 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-zbp4l" event={"ID":"75d8fa11-eb06-4aae-8e96-3bb4328d69d7","Type":"ContainerStarted","Data":"cd1c89822ddb6ae2f0bd6e0b5e86e0e70aafbb39511d0ae96e1bcd92ddfcde5e"} Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.621951 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-sysctl-allowlist" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.622589 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" event={"ID":"2c068a00-4049-443f-b5a6-deec4c086d13","Type":"ContainerStarted","Data":"db633e904492a200d0e7990416fabc0741420782a6192f7b8ca106183e34824c"} Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.626016 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" event={"ID":"7d6c295a-54fe-47c5-b587-940db5e2589b","Type":"ContainerStarted","Data":"66690bceee32a8bc93d4819a653121841be850b95d21ae638afcd699ac0c1bd3"} Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.626678 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-s7kkg"] Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.636346 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb" event={"ID":"f5eb80d8-5f29-4bd8-837f-c55402a17fe6","Type":"ContainerStarted","Data":"927e20afa247ff74245908ac80b7848a9537dd20d4f99c16bb73fdccdccc62f6"} Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.662354 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" event={"ID":"206345a2-ea7e-4a32-8c93-414290ba5c92","Type":"ContainerStarted","Data":"89960504b6ca18533ed50174ef37491b3e01c9effad03d5a3ccd2ec3ceba877b"} Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.673256 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9tvq\" (UniqueName: \"kubernetes.io/projected/0c526262-6ee4-4526-91d7-614b3cd91082-kube-api-access-p9tvq\") pod \"marketplace-operator-79b997595-8kcp8\" (UID: \"0c526262-6ee4-4526-91d7-614b3cd91082\") " pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.678636 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tml9z\" (UniqueName: \"kubernetes.io/projected/f4d2ff4d-7d50-461f-8ea3-57fdd1be7214-kube-api-access-tml9z\") pod \"control-plane-machine-set-operator-78cbb6b69f-nnzkd\" (UID: \"f4d2ff4d-7d50-461f-8ea3-57fdd1be7214\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnzkd" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.686830 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5zbpj" event={"ID":"6893cb7a-209f-4822-9e82-34ad39c7647f","Type":"ContainerStarted","Data":"9bc72981497f9fae182be4cf549f713490b6afca9f945dfd443674a18c7e4c8a"} Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.689372 4900 generic.go:334] "Generic (PLEG): container finished" podID="6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af" containerID="d6d780d7f0c175bab0d6c5f2ad187534854e085ceee566be8503bac0d3982649" exitCode=0 Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.690035 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" event={"ID":"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af","Type":"ContainerDied","Data":"d6d780d7f0c175bab0d6c5f2ad187534854e085ceee566be8503bac0d3982649"} Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.690072 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" event={"ID":"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af","Type":"ContainerStarted","Data":"74ac4b9ad975838ab19df219e2f4cc15dccde058baedf140ba94cacb9db6bae9"} Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.695264 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" event={"ID":"2b4fdd15-bb83-4db3-bf15-2101476b4000","Type":"ContainerStarted","Data":"50698a026c6ee84760b341e0a81cc1a1231b392961d0d83e6345aa615ac6874b"} Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.696008 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.696762 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sh4ss\" (UniqueName: \"kubernetes.io/projected/a3973a7c-f509-4769-93a6-4f71f99cc515-kube-api-access-sh4ss\") pod \"console-operator-58897d9998-x96hr\" (UID: \"a3973a7c-f509-4769-93a6-4f71f99cc515\") " pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.698355 4900 generic.go:334] "Generic (PLEG): container finished" podID="511728ec-f571-4333-b0e2-c6a897c6c2d5" containerID="3a114d1f6eb0d4cd75176976ae90ff8955899beb367ad2baa962d57097d75cad" exitCode=0 Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.699302 4900 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-n8cfh container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.699339 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" podUID="f693c83e-d25c-4f46-bbb6-fd38195cde95" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.699385 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" event={"ID":"511728ec-f571-4333-b0e2-c6a897c6c2d5","Type":"ContainerDied","Data":"3a114d1f6eb0d4cd75176976ae90ff8955899beb367ad2baa962d57097d75cad"} Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.699422 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" event={"ID":"511728ec-f571-4333-b0e2-c6a897c6c2d5","Type":"ContainerStarted","Data":"63b49c6f4d3e480f2b4d86a040f91f8c10e88cb7e850a277d1f93cf89d213adf"} Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.700320 4900 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-wndtt container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.7:6443/healthz\": dial tcp 10.217.0.7:6443: connect: connection refused" start-of-body= Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.700345 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" podUID="2b4fdd15-bb83-4db3-bf15-2101476b4000" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.7:6443/healthz\": dial tcp 10.217.0.7:6443: connect: connection refused" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.715617 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wrmdd"] Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.724759 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fc6f\" (UniqueName: \"kubernetes.io/projected/ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7-kube-api-access-6fc6f\") pod \"etcd-operator-b45778765-fw6lf\" (UID: \"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.732783 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.738293 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rwdmj"] Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.757354 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-566sl\" (UniqueName: \"kubernetes.io/projected/557eda06-2394-4c8c-82c1-dbc08a122232-kube-api-access-566sl\") pod \"openshift-controller-manager-operator-756b6f6bc6-vht44\" (UID: \"557eda06-2394-4c8c-82c1-dbc08a122232\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.764843 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g7cq\" (UniqueName: \"kubernetes.io/projected/0703acf1-af71-4249-b1ef-e19c6beb4d86-kube-api-access-7g7cq\") pod \"router-default-5444994796-zvswh\" (UID: \"0703acf1-af71-4249-b1ef-e19c6beb4d86\") " pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.792727 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8b9b210a-0ed1-438e-af3b-14c5db643e15-bound-sa-token\") pod \"ingress-operator-5b745b69d9-p297m\" (UID: \"8b9b210a-0ed1-438e-af3b-14c5db643e15\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.799168 4900 request.go:700] Waited for 1.893358231s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/serviceaccounts/ingress-operator/token Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.807669 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm"] Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.819260 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krjvt\" (UniqueName: \"kubernetes.io/projected/6561ed20-6634-4df7-891e-3c7f3e9427b0-kube-api-access-krjvt\") pod \"multus-admission-controller-857f4d67dd-7fkdm\" (UID: \"6561ed20-6634-4df7-891e-3c7f3e9427b0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-7fkdm" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.820331 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.827845 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bddbs\" (UniqueName: \"kubernetes.io/projected/8b9b210a-0ed1-438e-af3b-14c5db643e15-kube-api-access-bddbs\") pod \"ingress-operator-5b745b69d9-p297m\" (UID: \"8b9b210a-0ed1-438e-af3b-14c5db643e15\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.837128 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.841670 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.852475 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvjm6\" (UniqueName: \"kubernetes.io/projected/d9879637-6084-4966-9f47-2b99f22ea469-kube-api-access-wvjm6\") pod \"cluster-samples-operator-665b6dd947-2g4jf\" (UID: \"d9879637-6084-4966-9f47-2b99f22ea469\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.853921 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnzkd" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.860847 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.872596 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hg79h\" (UniqueName: \"kubernetes.io/projected/08f894ed-4dd9-4e81-8051-4a1024f93a0b-kube-api-access-hg79h\") pod \"machine-config-operator-74547568cd-b4rxs\" (UID: \"08f894ed-4dd9-4e81-8051-4a1024f93a0b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.884745 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sf77\" (UniqueName: \"kubernetes.io/projected/64e28912-899a-4d7f-8c6a-dffc3ca9f1b7-kube-api-access-5sf77\") pod \"machine-config-controller-84d6567774-gs2tm\" (UID: \"64e28912-899a-4d7f-8c6a-dffc3ca9f1b7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.906556 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rwsl\" (UniqueName: \"kubernetes.io/projected/398f26a6-4944-4c9b-926a-b4ef22eb2a1f-kube-api-access-7rwsl\") pod \"catalog-operator-68c6474976-zdrjw\" (UID: \"398f26a6-4944-4c9b-926a-b4ef22eb2a1f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.907521 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.928707 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.928958 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8nbg\" (UniqueName: \"kubernetes.io/projected/ef778856-8cbe-4b13-90f6-74bd28af2c86-kube-api-access-k8nbg\") pod \"openshift-config-operator-7777fb866f-v5vsd\" (UID: \"ef778856-8cbe-4b13-90f6-74bd28af2c86\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.943631 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.953212 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.957328 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.965510 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.975828 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.985269 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 27 12:27:24 crc kubenswrapper[4900]: I0127 12:27:24.996771 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-7fkdm" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.017801 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.020860 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.035175 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8kcp8"] Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.065275 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.072517 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.081184 4900 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 27 12:27:25 crc kubenswrapper[4900]: W0127 12:27:25.096740 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0703acf1_af71_4249_b1ef_e19c6beb4d86.slice/crio-b874a0d957cda638e23e478d9343c009d4fb7cf427f8e0bb797a12a4070e7dc0 WatchSource:0}: Error finding container b874a0d957cda638e23e478d9343c009d4fb7cf427f8e0bb797a12a4070e7dc0: Status 404 returned error can't find the container with id b874a0d957cda638e23e478d9343c009d4fb7cf427f8e0bb797a12a4070e7dc0 Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.103282 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.105190 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" Jan 27 12:27:25 crc kubenswrapper[4900]: W0127 12:27:25.116724 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c526262_6ee4_4526_91d7_614b3cd91082.slice/crio-2de967c1c9520900044bf0ef2256ff472c0eedb60be4f81e5890c1e28c42cee8 WatchSource:0}: Error finding container 2de967c1c9520900044bf0ef2256ff472c0eedb60be4f81e5890c1e28c42cee8: Status 404 returned error can't find the container with id 2de967c1c9520900044bf0ef2256ff472c0eedb60be4f81e5890c1e28c42cee8 Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.120823 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.153555 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.211162 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.211531 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.212938 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.213912 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.221126 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.241526 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.265616 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.283923 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.408045 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-registry-certificates\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.408123 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9a6a8e52-38d8-41a6-863f-78255609c063-profile-collector-cert\") pod \"olm-operator-6b444d44fb-jnwth\" (UID: \"9a6a8e52-38d8-41a6-863f-78255609c063\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.408145 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-installation-pull-secrets\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.408165 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/db5a985c-5b4c-4ab5-ab7c-61b356b88494-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-hffvz\" (UID: \"db5a985c-5b4c-4ab5-ab7c-61b356b88494\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.408185 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zt9mf\" (UniqueName: \"kubernetes.io/projected/db5a985c-5b4c-4ab5-ab7c-61b356b88494-kube-api-access-zt9mf\") pod \"package-server-manager-789f6589d5-hffvz\" (UID: \"db5a985c-5b4c-4ab5-ab7c-61b356b88494\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.408377 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-bound-sa-token\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.408468 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-registry-tls\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.408562 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-ca-trust-extracted\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.408633 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-trusted-ca\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.408662 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mthgq\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-kube-api-access-mthgq\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.408679 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9a6a8e52-38d8-41a6-863f-78255609c063-srv-cert\") pod \"olm-operator-6b444d44fb-jnwth\" (UID: \"9a6a8e52-38d8-41a6-863f-78255609c063\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.408697 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hc727\" (UniqueName: \"kubernetes.io/projected/9a6a8e52-38d8-41a6-863f-78255609c063-kube-api-access-hc727\") pod \"olm-operator-6b444d44fb-jnwth\" (UID: \"9a6a8e52-38d8-41a6-863f-78255609c063\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.408799 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: E0127 12:27:25.413106 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:25.913091486 +0000 UTC m=+73.150119696 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.556860 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.556981 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-csi-data-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557020 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-mountpoint-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557073 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/4a0de235-4f39-4196-93a2-a4e7e1637c19-signing-key\") pod \"service-ca-9c57cc56f-lld9r\" (UID: \"4a0de235-4f39-4196-93a2-a4e7e1637c19\") " pod="openshift-service-ca/service-ca-9c57cc56f-lld9r" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557114 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/176e188c-fe42-4703-8b29-2ad483a60231-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pk9tz\" (UID: \"176e188c-fe42-4703-8b29-2ad483a60231\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557129 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/36d27b4d-5f64-4436-8fd1-54243b90a439-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9zd6c\" (UID: \"36d27b4d-5f64-4436-8fd1-54243b90a439\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557191 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-bound-sa-token\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557206 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/977b9e9a-e953-4ec8-bd40-382b18f806d1-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-k6tpv\" (UID: \"977b9e9a-e953-4ec8-bd40-382b18f806d1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557261 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-registry-tls\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557301 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/4a0de235-4f39-4196-93a2-a4e7e1637c19-signing-cabundle\") pod \"service-ca-9c57cc56f-lld9r\" (UID: \"4a0de235-4f39-4196-93a2-a4e7e1637c19\") " pod="openshift-service-ca/service-ca-9c57cc56f-lld9r" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557339 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f735b40-5dd4-4411-962e-c948bfc18518-config\") pod \"service-ca-operator-777779d784-z5jcj\" (UID: \"3f735b40-5dd4-4411-962e-c948bfc18518\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557356 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fq8k\" (UniqueName: \"kubernetes.io/projected/1a5d7b57-de98-445e-83b6-1ff0eb859e01-kube-api-access-8fq8k\") pod \"cni-sysctl-allowlist-ds-clhb8\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557371 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/176e188c-fe42-4703-8b29-2ad483a60231-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pk9tz\" (UID: \"176e188c-fe42-4703-8b29-2ad483a60231\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557394 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twhs2\" (UniqueName: \"kubernetes.io/projected/8dd67d09-7a99-4971-a05e-1afc31b0afb8-kube-api-access-twhs2\") pod \"ingress-canary-nssds\" (UID: \"8dd67d09-7a99-4971-a05e-1afc31b0afb8\") " pod="openshift-ingress-canary/ingress-canary-nssds" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557410 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/665d09a8-9764-4b5d-975e-5c96fd671dd5-tmpfs\") pod \"packageserver-d55dfcdfc-hn2jw\" (UID: \"665d09a8-9764-4b5d-975e-5c96fd671dd5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557425 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f735b40-5dd4-4411-962e-c948bfc18518-serving-cert\") pod \"service-ca-operator-777779d784-z5jcj\" (UID: \"3f735b40-5dd4-4411-962e-c948bfc18518\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557475 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ea673ed9-5530-4b5d-8997-403f903d27a6-config-volume\") pod \"collect-profiles-29491935-l6zm9\" (UID: \"ea673ed9-5530-4b5d-8997-403f903d27a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557488 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/176e188c-fe42-4703-8b29-2ad483a60231-config\") pod \"kube-controller-manager-operator-78b949d7b-pk9tz\" (UID: \"176e188c-fe42-4703-8b29-2ad483a60231\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557567 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8dd67d09-7a99-4971-a05e-1afc31b0afb8-cert\") pod \"ingress-canary-nssds\" (UID: \"8dd67d09-7a99-4971-a05e-1afc31b0afb8\") " pod="openshift-ingress-canary/ingress-canary-nssds" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557629 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-registration-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557657 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-ca-trust-extracted\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557690 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a271161a-8adc-4b11-a132-03c70bed453e-node-bootstrap-token\") pod \"machine-config-server-75clq\" (UID: \"a271161a-8adc-4b11-a132-03c70bed453e\") " pod="openshift-machine-config-operator/machine-config-server-75clq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557717 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-trusted-ca\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557734 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5v88x\" (UniqueName: \"kubernetes.io/projected/3f735b40-5dd4-4411-962e-c948bfc18518-kube-api-access-5v88x\") pod \"service-ca-operator-777779d784-z5jcj\" (UID: \"3f735b40-5dd4-4411-962e-c948bfc18518\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557762 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mthgq\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-kube-api-access-mthgq\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557779 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9a6a8e52-38d8-41a6-863f-78255609c063-srv-cert\") pod \"olm-operator-6b444d44fb-jnwth\" (UID: \"9a6a8e52-38d8-41a6-863f-78255609c063\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557795 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hc727\" (UniqueName: \"kubernetes.io/projected/9a6a8e52-38d8-41a6-863f-78255609c063-kube-api-access-hc727\") pod \"olm-operator-6b444d44fb-jnwth\" (UID: \"9a6a8e52-38d8-41a6-863f-78255609c063\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557830 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/419258b7-ee70-4a6a-afdb-e35cd0570fc2-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-xrbcq\" (UID: \"419258b7-ee70-4a6a-afdb-e35cd0570fc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557861 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlv44\" (UniqueName: \"kubernetes.io/projected/419258b7-ee70-4a6a-afdb-e35cd0570fc2-kube-api-access-wlv44\") pod \"kube-storage-version-migrator-operator-b67b599dd-xrbcq\" (UID: \"419258b7-ee70-4a6a-afdb-e35cd0570fc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.557939 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhp9v\" (UniqueName: \"kubernetes.io/projected/87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd-kube-api-access-zhp9v\") pod \"dns-default-599cn\" (UID: \"87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd\") " pod="openshift-dns/dns-default-599cn" Jan 27 12:27:25 crc kubenswrapper[4900]: E0127 12:27:25.558027 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:26.057999296 +0000 UTC m=+73.295027556 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558084 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-socket-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558187 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sz4l7\" (UniqueName: \"kubernetes.io/projected/bd0dd2d2-a1be-492e-a685-eca20df9dca4-kube-api-access-sz4l7\") pod \"migrator-59844c95c7-pdc9s\" (UID: \"bd0dd2d2-a1be-492e-a685-eca20df9dca4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-pdc9s" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558234 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrl8b\" (UniqueName: \"kubernetes.io/projected/a271161a-8adc-4b11-a132-03c70bed453e-kube-api-access-wrl8b\") pod \"machine-config-server-75clq\" (UID: \"a271161a-8adc-4b11-a132-03c70bed453e\") " pod="openshift-machine-config-operator/machine-config-server-75clq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558284 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558340 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtckq\" (UniqueName: \"kubernetes.io/projected/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-kube-api-access-qtckq\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558374 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36d27b4d-5f64-4436-8fd1-54243b90a439-config\") pod \"kube-apiserver-operator-766d6c64bb-9zd6c\" (UID: \"36d27b4d-5f64-4436-8fd1-54243b90a439\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558403 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/665d09a8-9764-4b5d-975e-5c96fd671dd5-apiservice-cert\") pod \"packageserver-d55dfcdfc-hn2jw\" (UID: \"665d09a8-9764-4b5d-975e-5c96fd671dd5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558435 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/977b9e9a-e953-4ec8-bd40-382b18f806d1-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-k6tpv\" (UID: \"977b9e9a-e953-4ec8-bd40-382b18f806d1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558453 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxrsb\" (UniqueName: \"kubernetes.io/projected/ea673ed9-5530-4b5d-8997-403f903d27a6-kube-api-access-zxrsb\") pod \"collect-profiles-29491935-l6zm9\" (UID: \"ea673ed9-5530-4b5d-8997-403f903d27a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558469 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36d27b4d-5f64-4436-8fd1-54243b90a439-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9zd6c\" (UID: \"36d27b4d-5f64-4436-8fd1-54243b90a439\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558562 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/419258b7-ee70-4a6a-afdb-e35cd0570fc2-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-xrbcq\" (UID: \"419258b7-ee70-4a6a-afdb-e35cd0570fc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558601 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/1a5d7b57-de98-445e-83b6-1ff0eb859e01-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-clhb8\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558628 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-registry-certificates\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558653 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd-config-volume\") pod \"dns-default-599cn\" (UID: \"87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd\") " pod="openshift-dns/dns-default-599cn" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558671 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/977b9e9a-e953-4ec8-bd40-382b18f806d1-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-k6tpv\" (UID: \"977b9e9a-e953-4ec8-bd40-382b18f806d1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558688 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ea673ed9-5530-4b5d-8997-403f903d27a6-secret-volume\") pod \"collect-profiles-29491935-l6zm9\" (UID: \"ea673ed9-5530-4b5d-8997-403f903d27a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558741 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1a5d7b57-de98-445e-83b6-1ff0eb859e01-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-clhb8\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558772 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/665d09a8-9764-4b5d-975e-5c96fd671dd5-webhook-cert\") pod \"packageserver-d55dfcdfc-hn2jw\" (UID: \"665d09a8-9764-4b5d-975e-5c96fd671dd5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558794 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9a6a8e52-38d8-41a6-863f-78255609c063-profile-collector-cert\") pod \"olm-operator-6b444d44fb-jnwth\" (UID: \"9a6a8e52-38d8-41a6-863f-78255609c063\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558813 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a271161a-8adc-4b11-a132-03c70bed453e-certs\") pod \"machine-config-server-75clq\" (UID: \"a271161a-8adc-4b11-a132-03c70bed453e\") " pod="openshift-machine-config-operator/machine-config-server-75clq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558862 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/1a5d7b57-de98-445e-83b6-1ff0eb859e01-ready\") pod \"cni-sysctl-allowlist-ds-clhb8\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558892 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-installation-pull-secrets\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558911 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/db5a985c-5b4c-4ab5-ab7c-61b356b88494-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-hffvz\" (UID: \"db5a985c-5b4c-4ab5-ab7c-61b356b88494\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.558932 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zt9mf\" (UniqueName: \"kubernetes.io/projected/db5a985c-5b4c-4ab5-ab7c-61b356b88494-kube-api-access-zt9mf\") pod \"package-server-manager-789f6589d5-hffvz\" (UID: \"db5a985c-5b4c-4ab5-ab7c-61b356b88494\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.559023 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28db9\" (UniqueName: \"kubernetes.io/projected/665d09a8-9764-4b5d-975e-5c96fd671dd5-kube-api-access-28db9\") pod \"packageserver-d55dfcdfc-hn2jw\" (UID: \"665d09a8-9764-4b5d-975e-5c96fd671dd5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:25 crc kubenswrapper[4900]: E0127 12:27:25.561947 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:26.061934899 +0000 UTC m=+73.298963109 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.565001 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-plugins-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.565033 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdm2m\" (UniqueName: \"kubernetes.io/projected/4a0de235-4f39-4196-93a2-a4e7e1637c19-kube-api-access-xdm2m\") pod \"service-ca-9c57cc56f-lld9r\" (UID: \"4a0de235-4f39-4196-93a2-a4e7e1637c19\") " pod="openshift-service-ca/service-ca-9c57cc56f-lld9r" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.565112 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd-metrics-tls\") pod \"dns-default-599cn\" (UID: \"87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd\") " pod="openshift-dns/dns-default-599cn" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.569677 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-registry-certificates\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.575635 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-registry-tls\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.577303 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-ca-trust-extracted\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.609332 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/db5a985c-5b4c-4ab5-ab7c-61b356b88494-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-hffvz\" (UID: \"db5a985c-5b4c-4ab5-ab7c-61b356b88494\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.611002 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-installation-pull-secrets\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.620550 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf"] Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.623016 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-trusted-ca\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.637105 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-x96hr"] Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.669407 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-bound-sa-token\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.671414 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.673440 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8dd67d09-7a99-4971-a05e-1afc31b0afb8-cert\") pod \"ingress-canary-nssds\" (UID: \"8dd67d09-7a99-4971-a05e-1afc31b0afb8\") " pod="openshift-ingress-canary/ingress-canary-nssds" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.673929 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-registration-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.674228 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a271161a-8adc-4b11-a132-03c70bed453e-node-bootstrap-token\") pod \"machine-config-server-75clq\" (UID: \"a271161a-8adc-4b11-a132-03c70bed453e\") " pod="openshift-machine-config-operator/machine-config-server-75clq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.674291 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5v88x\" (UniqueName: \"kubernetes.io/projected/3f735b40-5dd4-4411-962e-c948bfc18518-kube-api-access-5v88x\") pod \"service-ca-operator-777779d784-z5jcj\" (UID: \"3f735b40-5dd4-4411-962e-c948bfc18518\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.674362 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/419258b7-ee70-4a6a-afdb-e35cd0570fc2-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-xrbcq\" (UID: \"419258b7-ee70-4a6a-afdb-e35cd0570fc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.674564 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlv44\" (UniqueName: \"kubernetes.io/projected/419258b7-ee70-4a6a-afdb-e35cd0570fc2-kube-api-access-wlv44\") pod \"kube-storage-version-migrator-operator-b67b599dd-xrbcq\" (UID: \"419258b7-ee70-4a6a-afdb-e35cd0570fc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.674861 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhp9v\" (UniqueName: \"kubernetes.io/projected/87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd-kube-api-access-zhp9v\") pod \"dns-default-599cn\" (UID: \"87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd\") " pod="openshift-dns/dns-default-599cn" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.677296 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-socket-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.678257 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-registration-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.680822 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-socket-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: E0127 12:27:25.681091 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:26.181026359 +0000 UTC m=+73.418054569 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.682496 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sz4l7\" (UniqueName: \"kubernetes.io/projected/bd0dd2d2-a1be-492e-a685-eca20df9dca4-kube-api-access-sz4l7\") pod \"migrator-59844c95c7-pdc9s\" (UID: \"bd0dd2d2-a1be-492e-a685-eca20df9dca4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-pdc9s" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709383 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrl8b\" (UniqueName: \"kubernetes.io/projected/a271161a-8adc-4b11-a132-03c70bed453e-kube-api-access-wrl8b\") pod \"machine-config-server-75clq\" (UID: \"a271161a-8adc-4b11-a132-03c70bed453e\") " pod="openshift-machine-config-operator/machine-config-server-75clq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709501 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709560 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtckq\" (UniqueName: \"kubernetes.io/projected/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-kube-api-access-qtckq\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709657 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36d27b4d-5f64-4436-8fd1-54243b90a439-config\") pod \"kube-apiserver-operator-766d6c64bb-9zd6c\" (UID: \"36d27b4d-5f64-4436-8fd1-54243b90a439\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709697 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/665d09a8-9764-4b5d-975e-5c96fd671dd5-apiservice-cert\") pod \"packageserver-d55dfcdfc-hn2jw\" (UID: \"665d09a8-9764-4b5d-975e-5c96fd671dd5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709731 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/977b9e9a-e953-4ec8-bd40-382b18f806d1-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-k6tpv\" (UID: \"977b9e9a-e953-4ec8-bd40-382b18f806d1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709755 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxrsb\" (UniqueName: \"kubernetes.io/projected/ea673ed9-5530-4b5d-8997-403f903d27a6-kube-api-access-zxrsb\") pod \"collect-profiles-29491935-l6zm9\" (UID: \"ea673ed9-5530-4b5d-8997-403f903d27a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709777 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36d27b4d-5f64-4436-8fd1-54243b90a439-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9zd6c\" (UID: \"36d27b4d-5f64-4436-8fd1-54243b90a439\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709817 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/419258b7-ee70-4a6a-afdb-e35cd0570fc2-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-xrbcq\" (UID: \"419258b7-ee70-4a6a-afdb-e35cd0570fc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709849 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/1a5d7b57-de98-445e-83b6-1ff0eb859e01-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-clhb8\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709877 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd-config-volume\") pod \"dns-default-599cn\" (UID: \"87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd\") " pod="openshift-dns/dns-default-599cn" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709896 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/977b9e9a-e953-4ec8-bd40-382b18f806d1-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-k6tpv\" (UID: \"977b9e9a-e953-4ec8-bd40-382b18f806d1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709920 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ea673ed9-5530-4b5d-8997-403f903d27a6-secret-volume\") pod \"collect-profiles-29491935-l6zm9\" (UID: \"ea673ed9-5530-4b5d-8997-403f903d27a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709951 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1a5d7b57-de98-445e-83b6-1ff0eb859e01-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-clhb8\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.709977 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/665d09a8-9764-4b5d-975e-5c96fd671dd5-webhook-cert\") pod \"packageserver-d55dfcdfc-hn2jw\" (UID: \"665d09a8-9764-4b5d-975e-5c96fd671dd5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710014 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a271161a-8adc-4b11-a132-03c70bed453e-certs\") pod \"machine-config-server-75clq\" (UID: \"a271161a-8adc-4b11-a132-03c70bed453e\") " pod="openshift-machine-config-operator/machine-config-server-75clq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710042 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/1a5d7b57-de98-445e-83b6-1ff0eb859e01-ready\") pod \"cni-sysctl-allowlist-ds-clhb8\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710112 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28db9\" (UniqueName: \"kubernetes.io/projected/665d09a8-9764-4b5d-975e-5c96fd671dd5-kube-api-access-28db9\") pod \"packageserver-d55dfcdfc-hn2jw\" (UID: \"665d09a8-9764-4b5d-975e-5c96fd671dd5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710139 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-plugins-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710157 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdm2m\" (UniqueName: \"kubernetes.io/projected/4a0de235-4f39-4196-93a2-a4e7e1637c19-kube-api-access-xdm2m\") pod \"service-ca-9c57cc56f-lld9r\" (UID: \"4a0de235-4f39-4196-93a2-a4e7e1637c19\") " pod="openshift-service-ca/service-ca-9c57cc56f-lld9r" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710187 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd-metrics-tls\") pod \"dns-default-599cn\" (UID: \"87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd\") " pod="openshift-dns/dns-default-599cn" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710255 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-csi-data-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710286 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-mountpoint-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710311 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/4a0de235-4f39-4196-93a2-a4e7e1637c19-signing-key\") pod \"service-ca-9c57cc56f-lld9r\" (UID: \"4a0de235-4f39-4196-93a2-a4e7e1637c19\") " pod="openshift-service-ca/service-ca-9c57cc56f-lld9r" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710349 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/176e188c-fe42-4703-8b29-2ad483a60231-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pk9tz\" (UID: \"176e188c-fe42-4703-8b29-2ad483a60231\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710369 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/36d27b4d-5f64-4436-8fd1-54243b90a439-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9zd6c\" (UID: \"36d27b4d-5f64-4436-8fd1-54243b90a439\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710404 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/977b9e9a-e953-4ec8-bd40-382b18f806d1-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-k6tpv\" (UID: \"977b9e9a-e953-4ec8-bd40-382b18f806d1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710453 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/4a0de235-4f39-4196-93a2-a4e7e1637c19-signing-cabundle\") pod \"service-ca-9c57cc56f-lld9r\" (UID: \"4a0de235-4f39-4196-93a2-a4e7e1637c19\") " pod="openshift-service-ca/service-ca-9c57cc56f-lld9r" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710504 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f735b40-5dd4-4411-962e-c948bfc18518-config\") pod \"service-ca-operator-777779d784-z5jcj\" (UID: \"3f735b40-5dd4-4411-962e-c948bfc18518\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710522 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fq8k\" (UniqueName: \"kubernetes.io/projected/1a5d7b57-de98-445e-83b6-1ff0eb859e01-kube-api-access-8fq8k\") pod \"cni-sysctl-allowlist-ds-clhb8\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710543 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/176e188c-fe42-4703-8b29-2ad483a60231-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pk9tz\" (UID: \"176e188c-fe42-4703-8b29-2ad483a60231\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710567 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twhs2\" (UniqueName: \"kubernetes.io/projected/8dd67d09-7a99-4971-a05e-1afc31b0afb8-kube-api-access-twhs2\") pod \"ingress-canary-nssds\" (UID: \"8dd67d09-7a99-4971-a05e-1afc31b0afb8\") " pod="openshift-ingress-canary/ingress-canary-nssds" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710608 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/665d09a8-9764-4b5d-975e-5c96fd671dd5-tmpfs\") pod \"packageserver-d55dfcdfc-hn2jw\" (UID: \"665d09a8-9764-4b5d-975e-5c96fd671dd5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710628 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f735b40-5dd4-4411-962e-c948bfc18518-serving-cert\") pod \"service-ca-operator-777779d784-z5jcj\" (UID: \"3f735b40-5dd4-4411-962e-c948bfc18518\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710657 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ea673ed9-5530-4b5d-8997-403f903d27a6-config-volume\") pod \"collect-profiles-29491935-l6zm9\" (UID: \"ea673ed9-5530-4b5d-8997-403f903d27a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.710701 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/176e188c-fe42-4703-8b29-2ad483a60231-config\") pod \"kube-controller-manager-operator-78b949d7b-pk9tz\" (UID: \"176e188c-fe42-4703-8b29-2ad483a60231\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.711173 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8dd67d09-7a99-4971-a05e-1afc31b0afb8-cert\") pod \"ingress-canary-nssds\" (UID: \"8dd67d09-7a99-4971-a05e-1afc31b0afb8\") " pod="openshift-ingress-canary/ingress-canary-nssds" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.711324 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36d27b4d-5f64-4436-8fd1-54243b90a439-config\") pod \"kube-apiserver-operator-766d6c64bb-9zd6c\" (UID: \"36d27b4d-5f64-4436-8fd1-54243b90a439\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.711625 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9a6a8e52-38d8-41a6-863f-78255609c063-srv-cert\") pod \"olm-operator-6b444d44fb-jnwth\" (UID: \"9a6a8e52-38d8-41a6-863f-78255609c063\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.699253 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/419258b7-ee70-4a6a-afdb-e35cd0570fc2-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-xrbcq\" (UID: \"419258b7-ee70-4a6a-afdb-e35cd0570fc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.711911 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/176e188c-fe42-4703-8b29-2ad483a60231-config\") pod \"kube-controller-manager-operator-78b949d7b-pk9tz\" (UID: \"176e188c-fe42-4703-8b29-2ad483a60231\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.712641 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/977b9e9a-e953-4ec8-bd40-382b18f806d1-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-k6tpv\" (UID: \"977b9e9a-e953-4ec8-bd40-382b18f806d1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.713020 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hc727\" (UniqueName: \"kubernetes.io/projected/9a6a8e52-38d8-41a6-863f-78255609c063-kube-api-access-hc727\") pod \"olm-operator-6b444d44fb-jnwth\" (UID: \"9a6a8e52-38d8-41a6-863f-78255609c063\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.713202 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1a5d7b57-de98-445e-83b6-1ff0eb859e01-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-clhb8\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.715332 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd-config-volume\") pod \"dns-default-599cn\" (UID: \"87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd\") " pod="openshift-dns/dns-default-599cn" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.715874 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/1a5d7b57-de98-445e-83b6-1ff0eb859e01-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-clhb8\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.717603 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/1a5d7b57-de98-445e-83b6-1ff0eb859e01-ready\") pod \"cni-sysctl-allowlist-ds-clhb8\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:25 crc kubenswrapper[4900]: E0127 12:27:25.717817 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:26.217798664 +0000 UTC m=+73.454826874 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.718329 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mthgq\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-kube-api-access-mthgq\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.719543 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f735b40-5dd4-4411-962e-c948bfc18518-config\") pod \"service-ca-operator-777779d784-z5jcj\" (UID: \"3f735b40-5dd4-4411-962e-c948bfc18518\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.721195 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-plugins-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.724406 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a271161a-8adc-4b11-a132-03c70bed453e-node-bootstrap-token\") pod \"machine-config-server-75clq\" (UID: \"a271161a-8adc-4b11-a132-03c70bed453e\") " pod="openshift-machine-config-operator/machine-config-server-75clq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.727434 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-csi-data-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.727501 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-mountpoint-dir\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.734091 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ea673ed9-5530-4b5d-8997-403f903d27a6-config-volume\") pod \"collect-profiles-29491935-l6zm9\" (UID: \"ea673ed9-5530-4b5d-8997-403f903d27a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.734608 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/665d09a8-9764-4b5d-975e-5c96fd671dd5-tmpfs\") pod \"packageserver-d55dfcdfc-hn2jw\" (UID: \"665d09a8-9764-4b5d-975e-5c96fd671dd5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.737020 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a271161a-8adc-4b11-a132-03c70bed453e-certs\") pod \"machine-config-server-75clq\" (UID: \"a271161a-8adc-4b11-a132-03c70bed453e\") " pod="openshift-machine-config-operator/machine-config-server-75clq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.737435 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-fw6lf"] Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.737979 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" event={"ID":"9080a9b0-9613-4077-bbbc-6ff558b4180c","Type":"ContainerStarted","Data":"ef5d4c2efcf8578aa8cfb3785df5a32fc9349c2cc687334be69d96a5932ff136"} Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.742830 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/419258b7-ee70-4a6a-afdb-e35cd0570fc2-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-xrbcq\" (UID: \"419258b7-ee70-4a6a-afdb-e35cd0570fc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.742654 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f735b40-5dd4-4411-962e-c948bfc18518-serving-cert\") pod \"service-ca-operator-777779d784-z5jcj\" (UID: \"3f735b40-5dd4-4411-962e-c948bfc18518\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.753223 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9a6a8e52-38d8-41a6-863f-78255609c063-profile-collector-cert\") pod \"olm-operator-6b444d44fb-jnwth\" (UID: \"9a6a8e52-38d8-41a6-863f-78255609c063\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.755591 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36d27b4d-5f64-4436-8fd1-54243b90a439-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9zd6c\" (UID: \"36d27b4d-5f64-4436-8fd1-54243b90a439\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.756433 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/4a0de235-4f39-4196-93a2-a4e7e1637c19-signing-cabundle\") pod \"service-ca-9c57cc56f-lld9r\" (UID: \"4a0de235-4f39-4196-93a2-a4e7e1637c19\") " pod="openshift-service-ca/service-ca-9c57cc56f-lld9r" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.756861 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" event={"ID":"55206b88-053e-4616-bfb0-82f5d8a2d4f9","Type":"ContainerStarted","Data":"34fde3d6aa583f73b8dcaa31315d0ff76b0e1f88286aa268db8699f08df60366"} Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.758112 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ea673ed9-5530-4b5d-8997-403f903d27a6-secret-volume\") pod \"collect-profiles-29491935-l6zm9\" (UID: \"ea673ed9-5530-4b5d-8997-403f903d27a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.759661 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb" event={"ID":"f5eb80d8-5f29-4bd8-837f-c55402a17fe6","Type":"ContainerStarted","Data":"d9749c6ec4499dc9bda7d0dadde69d515f60e2d8c2a1961e3cbc407ce8b3134c"} Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.760510 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5v88x\" (UniqueName: \"kubernetes.io/projected/3f735b40-5dd4-4411-962e-c948bfc18518-kube-api-access-5v88x\") pod \"service-ca-operator-777779d784-z5jcj\" (UID: \"3f735b40-5dd4-4411-962e-c948bfc18518\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.762223 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd-metrics-tls\") pod \"dns-default-599cn\" (UID: \"87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd\") " pod="openshift-dns/dns-default-599cn" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.762520 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/665d09a8-9764-4b5d-975e-5c96fd671dd5-apiservice-cert\") pod \"packageserver-d55dfcdfc-hn2jw\" (UID: \"665d09a8-9764-4b5d-975e-5c96fd671dd5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.762933 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/4a0de235-4f39-4196-93a2-a4e7e1637c19-signing-key\") pod \"service-ca-9c57cc56f-lld9r\" (UID: \"4a0de235-4f39-4196-93a2-a4e7e1637c19\") " pod="openshift-service-ca/service-ca-9c57cc56f-lld9r" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.765775 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhp9v\" (UniqueName: \"kubernetes.io/projected/87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd-kube-api-access-zhp9v\") pod \"dns-default-599cn\" (UID: \"87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd\") " pod="openshift-dns/dns-default-599cn" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.769834 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/665d09a8-9764-4b5d-975e-5c96fd671dd5-webhook-cert\") pod \"packageserver-d55dfcdfc-hn2jw\" (UID: \"665d09a8-9764-4b5d-975e-5c96fd671dd5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.770469 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rwdmj" event={"ID":"e4c1d903-be2e-4216-bc7a-e17c23bd6e63","Type":"ContainerStarted","Data":"ac08a16ff29c82486cacff37e381c0113fb5caae7801215302af53ba659c054b"} Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.772997 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" event={"ID":"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa","Type":"ContainerStarted","Data":"e05a30001ea2e92c20727786492424813b09f26bbefcfd085e0a507ffd94df8b"} Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.779358 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zt9mf\" (UniqueName: \"kubernetes.io/projected/db5a985c-5b4c-4ab5-ab7c-61b356b88494-kube-api-access-zt9mf\") pod \"package-server-manager-789f6589d5-hffvz\" (UID: \"db5a985c-5b4c-4ab5-ab7c-61b356b88494\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.780357 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sz4l7\" (UniqueName: \"kubernetes.io/projected/bd0dd2d2-a1be-492e-a685-eca20df9dca4-kube-api-access-sz4l7\") pod \"migrator-59844c95c7-pdc9s\" (UID: \"bd0dd2d2-a1be-492e-a685-eca20df9dca4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-pdc9s" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.781201 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" event={"ID":"7d6c295a-54fe-47c5-b587-940db5e2589b","Type":"ContainerStarted","Data":"910e3b7b8b09c577c083a37895e08a40194ec571f9a085a356206293927e0248"} Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.783837 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlv44\" (UniqueName: \"kubernetes.io/projected/419258b7-ee70-4a6a-afdb-e35cd0570fc2-kube-api-access-wlv44\") pod \"kube-storage-version-migrator-operator-b67b599dd-xrbcq\" (UID: \"419258b7-ee70-4a6a-afdb-e35cd0570fc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.785307 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" event={"ID":"2c068a00-4049-443f-b5a6-deec4c086d13","Type":"ContainerStarted","Data":"f251dd88ca98d81d4a64cd61a418f89bd1605ea20d47fc6a6046e9842764493d"} Jan 27 12:27:25 crc kubenswrapper[4900]: W0127 12:27:25.791524 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podccc38bcb_5db5_4b5c_8dd2_357b2cfc33c7.slice/crio-ec8972c0804fba0a33591ce437b3f7a98d1ed5e02d6da0ffe2448bbdecc5569c WatchSource:0}: Error finding container ec8972c0804fba0a33591ce437b3f7a98d1ed5e02d6da0ffe2448bbdecc5569c: Status 404 returned error can't find the container with id ec8972c0804fba0a33591ce437b3f7a98d1ed5e02d6da0ffe2448bbdecc5569c Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.798815 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" event={"ID":"0c526262-6ee4-4526-91d7-614b3cd91082","Type":"ContainerStarted","Data":"2de967c1c9520900044bf0ef2256ff472c0eedb60be4f81e5890c1e28c42cee8"} Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.806560 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/176e188c-fe42-4703-8b29-2ad483a60231-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pk9tz\" (UID: \"176e188c-fe42-4703-8b29-2ad483a60231\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.812892 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:25 crc kubenswrapper[4900]: E0127 12:27:25.814168 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:26.314139141 +0000 UTC m=+73.551167361 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.814446 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: E0127 12:27:25.815703 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:26.315677395 +0000 UTC m=+73.552705605 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.823076 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrl8b\" (UniqueName: \"kubernetes.io/projected/a271161a-8adc-4b11-a132-03c70bed453e-kube-api-access-wrl8b\") pod \"machine-config-server-75clq\" (UID: \"a271161a-8adc-4b11-a132-03c70bed453e\") " pod="openshift-machine-config-operator/machine-config-server-75clq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.826360 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdm2m\" (UniqueName: \"kubernetes.io/projected/4a0de235-4f39-4196-93a2-a4e7e1637c19-kube-api-access-xdm2m\") pod \"service-ca-9c57cc56f-lld9r\" (UID: \"4a0de235-4f39-4196-93a2-a4e7e1637c19\") " pod="openshift-service-ca/service-ca-9c57cc56f-lld9r" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.828472 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-zvswh" event={"ID":"0703acf1-af71-4249-b1ef-e19c6beb4d86","Type":"ContainerStarted","Data":"b874a0d957cda638e23e478d9343c009d4fb7cf427f8e0bb797a12a4070e7dc0"} Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.831673 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5zbpj" event={"ID":"6893cb7a-209f-4822-9e82-34ad39c7647f","Type":"ContainerStarted","Data":"50743efa7a52672831dd486b237ed89506e5fb9f872c1518d1a7e681a850be61"} Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.832251 4900 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-wndtt container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.7:6443/healthz\": dial tcp 10.217.0.7:6443: connect: connection refused" start-of-body= Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.832301 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" podUID="2b4fdd15-bb83-4db3-bf15-2101476b4000" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.7:6443/healthz\": dial tcp 10.217.0.7:6443: connect: connection refused" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.847816 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnzkd"] Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.854647 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxrsb\" (UniqueName: \"kubernetes.io/projected/ea673ed9-5530-4b5d-8997-403f903d27a6-kube-api-access-zxrsb\") pod \"collect-profiles-29491935-l6zm9\" (UID: \"ea673ed9-5530-4b5d-8997-403f903d27a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.855834 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44"] Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.862068 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-599cn" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.869158 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28db9\" (UniqueName: \"kubernetes.io/projected/665d09a8-9764-4b5d-975e-5c96fd671dd5-kube-api-access-28db9\") pod \"packageserver-d55dfcdfc-hn2jw\" (UID: \"665d09a8-9764-4b5d-975e-5c96fd671dd5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.879563 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.887372 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twhs2\" (UniqueName: \"kubernetes.io/projected/8dd67d09-7a99-4971-a05e-1afc31b0afb8-kube-api-access-twhs2\") pod \"ingress-canary-nssds\" (UID: \"8dd67d09-7a99-4971-a05e-1afc31b0afb8\") " pod="openshift-ingress-canary/ingress-canary-nssds" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.891481 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/977b9e9a-e953-4ec8-bd40-382b18f806d1-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-k6tpv\" (UID: \"977b9e9a-e953-4ec8-bd40-382b18f806d1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.915267 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw"] Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.917015 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtckq\" (UniqueName: \"kubernetes.io/projected/ddf9e91c-0239-4d06-af1e-9ef7d22e048a-kube-api-access-qtckq\") pod \"csi-hostpathplugin-nlrz5\" (UID: \"ddf9e91c-0239-4d06-af1e-9ef7d22e048a\") " pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.917071 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:25 crc kubenswrapper[4900]: E0127 12:27:25.917195 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:26.417168359 +0000 UTC m=+73.654196569 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.918449 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:25 crc kubenswrapper[4900]: E0127 12:27:25.920252 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:26.420220086 +0000 UTC m=+73.657248296 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.926259 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.939806 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.951885 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fq8k\" (UniqueName: \"kubernetes.io/projected/1a5d7b57-de98-445e-83b6-1ff0eb859e01-kube-api-access-8fq8k\") pod \"cni-sysctl-allowlist-ds-clhb8\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.963738 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-lld9r" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.965846 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/176e188c-fe42-4703-8b29-2ad483a60231-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pk9tz\" (UID: \"176e188c-fe42-4703-8b29-2ad483a60231\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.990757 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-pdc9s" Jan 27 12:27:25 crc kubenswrapper[4900]: I0127 12:27:25.992272 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.015453 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/977b9e9a-e953-4ec8-bd40-382b18f806d1-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-k6tpv\" (UID: \"977b9e9a-e953-4ec8-bd40-382b18f806d1\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv" Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.019508 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/36d27b4d-5f64-4436-8fd1-54243b90a439-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9zd6c\" (UID: \"36d27b4d-5f64-4436-8fd1-54243b90a439\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c" Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.028358 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:26 crc kubenswrapper[4900]: E0127 12:27:26.028823 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:26.528783313 +0000 UTC m=+73.765811523 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.028902 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:26 crc kubenswrapper[4900]: E0127 12:27:26.029690 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:26.529683359 +0000 UTC m=+73.766711569 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.033964 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj" Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.056653 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm"] Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.070926 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz" Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.078491 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.087090 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.103778 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-75clq" Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.113208 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-nssds" Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.130304 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:26 crc kubenswrapper[4900]: E0127 12:27:26.130802 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:26.630776512 +0000 UTC m=+73.867804722 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.152752 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.201372 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd"] Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.213623 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-p297m"] Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.219436 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs"] Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.232099 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:26 crc kubenswrapper[4900]: E0127 12:27:26.232531 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:26.732517082 +0000 UTC m=+73.969545292 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.243312 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-7fkdm"] Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.249714 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv" Jan 27 12:27:26 crc kubenswrapper[4900]: W0127 12:27:26.284610 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod64e28912_899a_4d7f_8c6a_dffc3ca9f1b7.slice/crio-dbb829a0a5bc6dc8317562370b82028999df1f6fa77acac264e7b4a16cf5c734 WatchSource:0}: Error finding container dbb829a0a5bc6dc8317562370b82028999df1f6fa77acac264e7b4a16cf5c734: Status 404 returned error can't find the container with id dbb829a0a5bc6dc8317562370b82028999df1f6fa77acac264e7b4a16cf5c734 Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.306313 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c" Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.334029 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:26 crc kubenswrapper[4900]: E0127 12:27:26.334450 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:26.834432248 +0000 UTC m=+74.071460458 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.399002 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-599cn"] Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.437100 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:26 crc kubenswrapper[4900]: E0127 12:27:26.437896 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:26.937824237 +0000 UTC m=+74.174852447 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.447198 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz"] Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.523953 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth"] Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.524358 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq"] Jan 27 12:27:26 crc kubenswrapper[4900]: W0127 12:27:26.535162 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb5a985c_5b4c_4ab5_ab7c_61b356b88494.slice/crio-17a0a7795e09c87033304a9086007d7a4d3f330eb4eb96e4ab53b7aeec23bc5e WatchSource:0}: Error finding container 17a0a7795e09c87033304a9086007d7a4d3f330eb4eb96e4ab53b7aeec23bc5e: Status 404 returned error can't find the container with id 17a0a7795e09c87033304a9086007d7a4d3f330eb4eb96e4ab53b7aeec23bc5e Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.537879 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:26 crc kubenswrapper[4900]: E0127 12:27:26.538040 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:27.038018813 +0000 UTC m=+74.275047023 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.538201 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:26 crc kubenswrapper[4900]: E0127 12:27:26.538640 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:27.038619371 +0000 UTC m=+74.275647581 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.639428 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:26 crc kubenswrapper[4900]: E0127 12:27:26.639558 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:27.139533088 +0000 UTC m=+74.376561298 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.639849 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:26 crc kubenswrapper[4900]: E0127 12:27:26.640210 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:27.140196017 +0000 UTC m=+74.377224227 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.741951 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:26 crc kubenswrapper[4900]: E0127 12:27:26.742655 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:27.242630238 +0000 UTC m=+74.479658448 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.845241 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:26 crc kubenswrapper[4900]: E0127 12:27:26.845930 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:27.345913813 +0000 UTC m=+74.582942023 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.896134 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" event={"ID":"db5a985c-5b4c-4ab5-ab7c-61b356b88494","Type":"ContainerStarted","Data":"17a0a7795e09c87033304a9086007d7a4d3f330eb4eb96e4ab53b7aeec23bc5e"} Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.915429 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-7fkdm" event={"ID":"6561ed20-6634-4df7-891e-3c7f3e9427b0","Type":"ContainerStarted","Data":"c5185e40d96e35cafc28d473cb24a21dded983e3016eb03fa75b186619d27f88"} Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.947994 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:26 crc kubenswrapper[4900]: E0127 12:27:26.948435 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:27.448389426 +0000 UTC m=+74.685417656 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:26 crc kubenswrapper[4900]: I0127 12:27:26.949997 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:26 crc kubenswrapper[4900]: E0127 12:27:26.950568 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:27.450554548 +0000 UTC m=+74.687582758 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.010940 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-lld9r"] Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.014086 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz"] Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.018792 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw"] Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.034974 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" event={"ID":"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7","Type":"ContainerStarted","Data":"ec8972c0804fba0a33591ce437b3f7a98d1ed5e02d6da0ffe2448bbdecc5569c"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.035949 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-599cn" event={"ID":"87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd","Type":"ContainerStarted","Data":"7380f280a08e14c6548f87ffc6aac29d12268e76c4b812c01d3f9f538e168fd4"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.039996 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" event={"ID":"398f26a6-4944-4c9b-926a-b4ef22eb2a1f","Type":"ContainerStarted","Data":"7bd54cfed730f053b6130b9279a670325e40b5c091d7e80533fae8c2d73d34e1"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.046771 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" event={"ID":"6ff9b4ed-de5c-4fb9-9eea-9bb84d2f11af","Type":"ContainerStarted","Data":"0620562637c665d9992ee856d5e924bfe4384840d178ce3b7c14885637cca4f9"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.053644 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.053737 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" event={"ID":"ef778856-8cbe-4b13-90f6-74bd28af2c86","Type":"ContainerStarted","Data":"b906d599063f4f1caccad1061151242f25127254d5cdaade47bbe7beca9b562e"} Jan 27 12:27:27 crc kubenswrapper[4900]: E0127 12:27:27.053788 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:27.553767951 +0000 UTC m=+74.790796161 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.054911 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:27 crc kubenswrapper[4900]: E0127 12:27:27.055253 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:27.555239833 +0000 UTC m=+74.792268043 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.057638 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" event={"ID":"55206b88-053e-4616-bfb0-82f5d8a2d4f9","Type":"ContainerStarted","Data":"dd3ab14bdbdceb12f680980ec5321685b3f6e471070e1551b23cb380155c6958"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.058851 4900 csr.go:261] certificate signing request csr-glppf is approved, waiting to be issued Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.060692 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf" event={"ID":"d9879637-6084-4966-9f47-2b99f22ea469","Type":"ContainerStarted","Data":"01e326f239c164824e2aa466d678a6d2ce8a2a297e0501cfc1e74bd5b40c35d1"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.070517 4900 csr.go:257] certificate signing request csr-glppf is issued Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.087800 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" event={"ID":"8b9b210a-0ed1-438e-af3b-14c5db643e15","Type":"ContainerStarted","Data":"1bbb516733b7ac299db54bebf664ff35d8fe8b0fa3f180445a23b6e6bd7a62e6"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.094865 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" event={"ID":"7d6c295a-54fe-47c5-b587-940db5e2589b","Type":"ContainerStarted","Data":"af54b8c37791866bc7ad4b0d4424d99399306f1f4de6853f544ff6eef308704b"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.112196 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnzkd" event={"ID":"f4d2ff4d-7d50-461f-8ea3-57fdd1be7214","Type":"ContainerStarted","Data":"6e09ddc21baf2627d3aa0d6174276bfea63cd03f915c0f10646549fe92870e63"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.114757 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" event={"ID":"08f894ed-4dd9-4e81-8051-4a1024f93a0b","Type":"ContainerStarted","Data":"a2802d0d1f26ac3247a1044a8bc893b59362579780d9055bb60836dfcb6734cb"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.115769 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" event={"ID":"64e28912-899a-4d7f-8c6a-dffc3ca9f1b7","Type":"ContainerStarted","Data":"dbb829a0a5bc6dc8317562370b82028999df1f6fa77acac264e7b4a16cf5c734"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.120005 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44" event={"ID":"557eda06-2394-4c8c-82c1-dbc08a122232","Type":"ContainerStarted","Data":"a102a0aff99d56097785a575811043a9c0bfd83595399c1ad2e442b6e9033699"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.130195 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-zbp4l" event={"ID":"75d8fa11-eb06-4aae-8e96-3bb4328d69d7","Type":"ContainerStarted","Data":"4c2a78bc02f54661dc21c1c5a98c1a4ed695bc4feb4be9ce0990c10fee1a9331"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.131969 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-zbp4l" Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.140408 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.140544 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.148813 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-x96hr" event={"ID":"a3973a7c-f509-4769-93a6-4f71f99cc515","Type":"ContainerStarted","Data":"ad4e41043855c5923733005b35028fefb190fc027276ff815832eed672961723"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.156783 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:27 crc kubenswrapper[4900]: E0127 12:27:27.157793 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:27.657773957 +0000 UTC m=+74.894802167 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.196694 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" event={"ID":"8a55139c-7a3d-4800-b2d1-8cc5270d6eaa","Type":"ContainerStarted","Data":"13783553317f7d40fd11591f1fd1bf792fe09f2e9b3cf44e622ac8308fb10d68"} Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.260427 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:27 crc kubenswrapper[4900]: E0127 12:27:27.267484 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:27.767459927 +0000 UTC m=+75.004488137 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.348141 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" podStartSLOduration=36.348102902 podStartE2EDuration="36.348102902s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:27.324297469 +0000 UTC m=+74.561325679" watchObservedRunningTime="2026-01-27 12:27:27.348102902 +0000 UTC m=+74.585131112" Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.367844 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:27 crc kubenswrapper[4900]: E0127 12:27:27.369006 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:27.868983931 +0000 UTC m=+75.106012151 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.386978 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-nssds"] Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.388277 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.399574 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-pdc9s"] Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.414483 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj"] Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.470911 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:27 crc kubenswrapper[4900]: E0127 12:27:27.471345 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:27.97132619 +0000 UTC m=+75.208354410 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.572379 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:27 crc kubenswrapper[4900]: E0127 12:27:27.572804 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:28.072783263 +0000 UTC m=+75.309811473 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.592663 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-lrt6s" podStartSLOduration=36.592628863 podStartE2EDuration="36.592628863s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:27.590264675 +0000 UTC m=+74.827292915" watchObservedRunningTime="2026-01-27 12:27:27.592628863 +0000 UTC m=+74.829657073" Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.675634 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:27 crc kubenswrapper[4900]: E0127 12:27:27.676252 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:28.176234793 +0000 UTC m=+75.413263003 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.736753 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" podStartSLOduration=37.736695769 podStartE2EDuration="37.736695769s" podCreationTimestamp="2026-01-27 12:26:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:27.730504351 +0000 UTC m=+74.967532561" watchObservedRunningTime="2026-01-27 12:27:27.736695769 +0000 UTC m=+74.973723999" Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.777772 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:27 crc kubenswrapper[4900]: E0127 12:27:27.778003 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:28.277964594 +0000 UTC m=+75.514992804 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.778315 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:27 crc kubenswrapper[4900]: E0127 12:27:27.778714 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:28.278706785 +0000 UTC m=+75.515734995 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.791843 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" podStartSLOduration=37.791808971 podStartE2EDuration="37.791808971s" podCreationTimestamp="2026-01-27 12:26:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:27.787971771 +0000 UTC m=+75.024999971" watchObservedRunningTime="2026-01-27 12:27:27.791808971 +0000 UTC m=+75.028837181" Jan 27 12:27:27 crc kubenswrapper[4900]: W0127 12:27:27.862514 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbd0dd2d2_a1be_492e_a685_eca20df9dca4.slice/crio-d1bde16332ce13773abaa4f037d76f65569f2a983c77cd5f9d22845e4350e308 WatchSource:0}: Error finding container d1bde16332ce13773abaa4f037d76f65569f2a983c77cd5f9d22845e4350e308: Status 404 returned error can't find the container with id d1bde16332ce13773abaa4f037d76f65569f2a983c77cd5f9d22845e4350e308 Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.887612 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:27 crc kubenswrapper[4900]: E0127 12:27:27.888008 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:28.387988353 +0000 UTC m=+75.625016563 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.901950 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv"] Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.947838 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-zbp4l" podStartSLOduration=36.94780344 podStartE2EDuration="36.94780344s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:27.941631173 +0000 UTC m=+75.178659383" watchObservedRunningTime="2026-01-27 12:27:27.94780344 +0000 UTC m=+75.184831650" Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.960376 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c"] Jan 27 12:27:27 crc kubenswrapper[4900]: I0127 12:27:27.989986 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:27 crc kubenswrapper[4900]: E0127 12:27:27.990421 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:28.490405693 +0000 UTC m=+75.727433903 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.238830 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-27 12:22:27 +0000 UTC, rotation deadline is 2026-11-27 04:57:41.148348862 +0000 UTC Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.239247 4900 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 7288h30m12.909106424s for next certificate rotation Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.240949 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:28 crc kubenswrapper[4900]: E0127 12:27:28.241584 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:28.741536704 +0000 UTC m=+75.978564914 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.318915 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" event={"ID":"08f894ed-4dd9-4e81-8051-4a1024f93a0b","Type":"ContainerStarted","Data":"ac212b0a271fa7601264a45a79fb1cba7f67773274f809c7b4d6760f6b541b72"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.343551 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:28 crc kubenswrapper[4900]: E0127 12:27:28.346589 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:28.846560939 +0000 UTC m=+76.083589159 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.367945 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9"] Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.369670 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-nssds" event={"ID":"8dd67d09-7a99-4971-a05e-1afc31b0afb8","Type":"ContainerStarted","Data":"5de8ea84b475858658febf787a32a708f72f1b1f4a11d1b8fad3e0b0b33c7773"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.372108 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nlrz5"] Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.401753 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" event={"ID":"665d09a8-9764-4b5d-975e-5c96fd671dd5","Type":"ContainerStarted","Data":"ac239ce6022b80bb2913da913ad412c5f983f48eb7be9022f80a5df28a01bf92"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.412337 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" event={"ID":"511728ec-f571-4333-b0e2-c6a897c6c2d5","Type":"ContainerStarted","Data":"89171c080deb58310e5e90db3444414f383e9719e25ac38dbe9c20871bc390c3"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.447146 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:28 crc kubenswrapper[4900]: E0127 12:27:28.447678 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:28.947657021 +0000 UTC m=+76.184685231 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.455267 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" event={"ID":"64e28912-899a-4d7f-8c6a-dffc3ca9f1b7","Type":"ContainerStarted","Data":"ce6f423aa79fde3fd4e418682bb1333c11b38ac3a8226d83ea521a442e7b26bc"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.466830 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" event={"ID":"398f26a6-4944-4c9b-926a-b4ef22eb2a1f","Type":"ContainerStarted","Data":"defe4930cbdb8606dcabcddc515864def198ecf1db8f16ae3ca2fa48dee88898"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.477581 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-zvswh" event={"ID":"0703acf1-af71-4249-b1ef-e19c6beb4d86","Type":"ContainerStarted","Data":"8563ab30e2521d59b7bc253b46819ad4ef121d681d3c7cebdd6efe4151e7b324"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.511830 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" event={"ID":"9a6a8e52-38d8-41a6-863f-78255609c063","Type":"ContainerStarted","Data":"2625ce8b4dc3e857cfaf81327d9714c525a546e308d464898404e2ae64a6b5f3"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.514087 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" event={"ID":"9080a9b0-9613-4077-bbbc-6ff558b4180c","Type":"ContainerStarted","Data":"0d9f813b00e6e233cd0f9be25e1300d7b0ee80f394dfd0ffb090005f3abf52d2"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.516889 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.519773 4900 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-52vsm container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.519838 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" podUID="9080a9b0-9613-4077-bbbc-6ff558b4180c" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.527432 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-x96hr" event={"ID":"a3973a7c-f509-4769-93a6-4f71f99cc515","Type":"ContainerStarted","Data":"fa508b65fae983115f92df6ad95ea7baccc775be7decb3684cde09262e3926c3"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.528681 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.534998 4900 patch_prober.go:28] interesting pod/console-operator-58897d9998-x96hr container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/readyz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.535074 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-x96hr" podUID="a3973a7c-f509-4769-93a6-4f71f99cc515" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/readyz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.548752 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:28 crc kubenswrapper[4900]: E0127 12:27:28.549318 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:29.04929755 +0000 UTC m=+76.286325770 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.555358 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rwdmj" event={"ID":"e4c1d903-be2e-4216-bc7a-e17c23bd6e63","Type":"ContainerStarted","Data":"f01831d5c359e4a163bdcaea4a90306fd210de6ac74a9d8bd0ec9c34793e8bf1"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.558010 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-5zbpj" podStartSLOduration=37.557967609 podStartE2EDuration="37.557967609s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:28.555735735 +0000 UTC m=+75.792763945" watchObservedRunningTime="2026-01-27 12:27:28.557967609 +0000 UTC m=+75.794995819" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.563121 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" event={"ID":"0c526262-6ee4-4526-91d7-614b3cd91082","Type":"ContainerStarted","Data":"a4361e38b46e40aad517c1d78e85dfc2c2cf5a6639fc9abdfbda2cbc02a3860f"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.566005 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.573543 4900 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8kcp8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.34:8080/healthz\": dial tcp 10.217.0.34:8080: connect: connection refused" start-of-body= Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.573630 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" podUID="0c526262-6ee4-4526-91d7-614b3cd91082" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.34:8080/healthz\": dial tcp 10.217.0.34:8080: connect: connection refused" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.575535 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv" event={"ID":"977b9e9a-e953-4ec8-bd40-382b18f806d1","Type":"ContainerStarted","Data":"82f7b54897283e732d3759ee3707bc873998d16870efb39b4ae955662fefce06"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.594511 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz" event={"ID":"176e188c-fe42-4703-8b29-2ad483a60231","Type":"ContainerStarted","Data":"b26e0e2f0671452e3a1c0c750bcc954db2d3dacfa9a694c654c91152f6ce6124"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.604649 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-rfgbs" podStartSLOduration=37.604625499 podStartE2EDuration="37.604625499s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:28.602618651 +0000 UTC m=+75.839646861" watchObservedRunningTime="2026-01-27 12:27:28.604625499 +0000 UTC m=+75.841653709" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.629826 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" event={"ID":"ef778856-8cbe-4b13-90f6-74bd28af2c86","Type":"ContainerStarted","Data":"21a1b90dde174b8a07e1e0e5e75c33e6bd63083a62b2d26b714f93b1d13d5100"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.671153 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-tc5cb" podStartSLOduration=38.671130488 podStartE2EDuration="38.671130488s" podCreationTimestamp="2026-01-27 12:26:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:28.642784034 +0000 UTC m=+75.879812264" watchObservedRunningTime="2026-01-27 12:27:28.671130488 +0000 UTC m=+75.908158698" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.696003 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" podStartSLOduration=37.695976612 podStartE2EDuration="37.695976612s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:28.673158566 +0000 UTC m=+75.910186776" watchObservedRunningTime="2026-01-27 12:27:28.695976612 +0000 UTC m=+75.933004822" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.701006 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:28 crc kubenswrapper[4900]: E0127 12:27:28.706443 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:29.206410691 +0000 UTC m=+76.443439081 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.708458 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44" event={"ID":"557eda06-2394-4c8c-82c1-dbc08a122232","Type":"ContainerStarted","Data":"e3956d0752a1fc80f9bcb3ff543bc9ad01d22f44558c2648d494069703fee044"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.746968 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" event={"ID":"ccc38bcb-5db5-4b5c-8dd2-357b2cfc33c7","Type":"ContainerStarted","Data":"33dec3fff0b6cc0838836426230d47314fbb97aed6f44abaa8a31e164e3ef6c1"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.755423 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" event={"ID":"8b9b210a-0ed1-438e-af3b-14c5db643e15","Type":"ContainerStarted","Data":"5d3a11d7d1fde23607c1ec8a73f43bae9f70b889212e1763bf64094ec1410c04"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.815152 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:28 crc kubenswrapper[4900]: E0127 12:27:28.815590 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:29.315570705 +0000 UTC m=+76.552598915 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.815725 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-pdc9s" event={"ID":"bd0dd2d2-a1be-492e-a685-eca20df9dca4","Type":"ContainerStarted","Data":"d1bde16332ce13773abaa4f037d76f65569f2a983c77cd5f9d22845e4350e308"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.836155 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-x96hr" podStartSLOduration=37.836119635 podStartE2EDuration="37.836119635s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:28.700744748 +0000 UTC m=+75.937772958" watchObservedRunningTime="2026-01-27 12:27:28.836119635 +0000 UTC m=+76.073147845" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.848278 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.861157 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.861218 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.880633 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnzkd" event={"ID":"f4d2ff4d-7d50-461f-8ea3-57fdd1be7214","Type":"ContainerStarted","Data":"d55bf21b99821dd8a7943a8ecdad9205c95a93a679d22075a8b6cace1a6ca324"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.887635 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" podStartSLOduration=37.887608424 podStartE2EDuration="37.887608424s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:28.848085679 +0000 UTC m=+76.085113899" watchObservedRunningTime="2026-01-27 12:27:28.887608424 +0000 UTC m=+76.124636654" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.903297 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-lld9r" event={"ID":"4a0de235-4f39-4196-93a2-a4e7e1637c19","Type":"ContainerStarted","Data":"9186943134d99e6d629b3bb09c731f9c89c73c18dc3ef0f948fa9b6126e78ac3"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.923397 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:28 crc kubenswrapper[4900]: E0127 12:27:28.924681 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:29.424664628 +0000 UTC m=+76.661692838 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.929001 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq" event={"ID":"419258b7-ee70-4a6a-afdb-e35cd0570fc2","Type":"ContainerStarted","Data":"ce88a7a6d6ff410819b12f8da23d496cb8ab33b9bf146727e7f8d655c98c461c"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.949078 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-zvswh" podStartSLOduration=37.949029227 podStartE2EDuration="37.949029227s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:28.944121416 +0000 UTC m=+76.181149636" watchObservedRunningTime="2026-01-27 12:27:28.949029227 +0000 UTC m=+76.186057437" Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.968983 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-75clq" event={"ID":"a271161a-8adc-4b11-a132-03c70bed453e","Type":"ContainerStarted","Data":"ecd6a1ee147a10a0d465817592d86ce4bb6de06910159f6fe8a0b843e7626cac"} Jan 27 12:27:28 crc kubenswrapper[4900]: I0127 12:27:28.988087 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj" event={"ID":"3f735b40-5dd4-4411-962e-c948bfc18518","Type":"ContainerStarted","Data":"26953e93e9acf3b0278cf40e90f4ddce1d0d420d446770b1479967504a36404f"} Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.007959 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" event={"ID":"1a5d7b57-de98-445e-83b6-1ff0eb859e01","Type":"ContainerStarted","Data":"028e3f1d56981b93660996246759f07b7acb59134e8d4a7fc20c36dd7b477781"} Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.010168 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.010231 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.034977 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vht44" podStartSLOduration=38.034952804 podStartE2EDuration="38.034952804s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:29.004737256 +0000 UTC m=+76.241765467" watchObservedRunningTime="2026-01-27 12:27:29.034952804 +0000 UTC m=+76.271981014" Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.040975 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:29 crc kubenswrapper[4900]: E0127 12:27:29.042701 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:29.542678596 +0000 UTC m=+76.779706816 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.086545 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-fw6lf" podStartSLOduration=38.086525215 podStartE2EDuration="38.086525215s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:29.050575303 +0000 UTC m=+76.287603533" watchObservedRunningTime="2026-01-27 12:27:29.086525215 +0000 UTC m=+76.323553425" Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.086669 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nnzkd" podStartSLOduration=38.086666069 podStartE2EDuration="38.086666069s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:29.085038192 +0000 UTC m=+76.322066402" watchObservedRunningTime="2026-01-27 12:27:29.086666069 +0000 UTC m=+76.323694279" Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.127555 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-st975" podStartSLOduration=40.127527512 podStartE2EDuration="40.127527512s" podCreationTimestamp="2026-01-27 12:26:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:29.125672709 +0000 UTC m=+76.362700929" watchObservedRunningTime="2026-01-27 12:27:29.127527512 +0000 UTC m=+76.364555722" Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.163266 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:29 crc kubenswrapper[4900]: E0127 12:27:29.165294 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:29.665266416 +0000 UTC m=+76.902294626 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.206567 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-75clq" podStartSLOduration=7.205974194 podStartE2EDuration="7.205974194s" podCreationTimestamp="2026-01-27 12:27:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:29.196311597 +0000 UTC m=+76.433339807" watchObservedRunningTime="2026-01-27 12:27:29.205974194 +0000 UTC m=+76.443002424" Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.265093 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" podStartSLOduration=38.26504328 podStartE2EDuration="38.26504328s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:29.237291913 +0000 UTC m=+76.474320123" watchObservedRunningTime="2026-01-27 12:27:29.26504328 +0000 UTC m=+76.502071480" Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.265621 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:29 crc kubenswrapper[4900]: E0127 12:27:29.266189 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:29.766162932 +0000 UTC m=+77.003191142 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.367984 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:29 crc kubenswrapper[4900]: E0127 12:27:29.368326 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:29.868288205 +0000 UTC m=+77.105316415 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.368680 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:29 crc kubenswrapper[4900]: E0127 12:27:29.369124 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:29.869115188 +0000 UTC m=+77.106143398 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.473375 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:29 crc kubenswrapper[4900]: E0127 12:27:29.473566 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:29.973527986 +0000 UTC m=+77.210556196 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.473906 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:29 crc kubenswrapper[4900]: E0127 12:27:29.474494 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:29.974471793 +0000 UTC m=+77.211500003 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.574810 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:29 crc kubenswrapper[4900]: E0127 12:27:29.575345 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:30.075324819 +0000 UTC m=+77.312353029 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.683506 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:29 crc kubenswrapper[4900]: E0127 12:27:29.683864 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:30.183850575 +0000 UTC m=+77.420878785 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.806873 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:29 crc kubenswrapper[4900]: E0127 12:27:29.807443 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:30.307407291 +0000 UTC m=+77.544435501 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.890602 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.891118 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 27 12:27:29 crc kubenswrapper[4900]: I0127 12:27:29.913016 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:29 crc kubenswrapper[4900]: E0127 12:27:29.913804 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:30.413787396 +0000 UTC m=+77.650815606 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.014245 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:30 crc kubenswrapper[4900]: E0127 12:27:30.014535 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:30.514477987 +0000 UTC m=+77.751506197 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.014974 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:30 crc kubenswrapper[4900]: E0127 12:27:30.015489 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:30.515475835 +0000 UTC m=+77.752504035 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.045972 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" event={"ID":"1a5d7b57-de98-445e-83b6-1ff0eb859e01","Type":"ContainerStarted","Data":"29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.049256 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" event={"ID":"ea673ed9-5530-4b5d-8997-403f903d27a6","Type":"ContainerStarted","Data":"640d5ad067ae99ae44c0d01c68c5fe1ce407c89a12675f519764cbcb91097de9"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.049306 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" event={"ID":"ea673ed9-5530-4b5d-8997-403f903d27a6","Type":"ContainerStarted","Data":"3f33329316be21f2d3e524bd5d9a494cca94438625a223a05e80175877036d27"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.081100 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" event={"ID":"08f894ed-4dd9-4e81-8051-4a1024f93a0b","Type":"ContainerStarted","Data":"149a7496634942d6e458ef846703b766dc63e4342df324442c3eaa034e638f76"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.088629 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-nssds" event={"ID":"8dd67d09-7a99-4971-a05e-1afc31b0afb8","Type":"ContainerStarted","Data":"1dc614ad977d853203c61ba5cec5cd8074f41095703ce2133da674ccf55081b3"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.094663 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj" event={"ID":"3f735b40-5dd4-4411-962e-c948bfc18518","Type":"ContainerStarted","Data":"cf7b26c28ec96d93904b77ef651dd86c71de5538650b4ccb6d93ae6b60670dbf"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.115097 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" event={"ID":"8b9b210a-0ed1-438e-af3b-14c5db643e15","Type":"ContainerStarted","Data":"a6660c02a22421e714fc1dee3d8bd0f0cf6aba39ef260cb926d118f2203b39d9"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.115680 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:30 crc kubenswrapper[4900]: E0127 12:27:30.116926 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:30.616903317 +0000 UTC m=+77.853931517 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.118611 4900 generic.go:334] "Generic (PLEG): container finished" podID="ef778856-8cbe-4b13-90f6-74bd28af2c86" containerID="21a1b90dde174b8a07e1e0e5e75c33e6bd63083a62b2d26b714f93b1d13d5100" exitCode=0 Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.118671 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" event={"ID":"ef778856-8cbe-4b13-90f6-74bd28af2c86","Type":"ContainerDied","Data":"21a1b90dde174b8a07e1e0e5e75c33e6bd63083a62b2d26b714f93b1d13d5100"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.123767 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" event={"ID":"511728ec-f571-4333-b0e2-c6a897c6c2d5","Type":"ContainerStarted","Data":"7b2aa3b76f3176ac03f8db768ef097edab2d68e42b9bf6f2b4efdf26a5bc96e4"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.127517 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-nssds" podStartSLOduration=8.127496892 podStartE2EDuration="8.127496892s" podCreationTimestamp="2026-01-27 12:27:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:30.126510413 +0000 UTC m=+77.363538623" watchObservedRunningTime="2026-01-27 12:27:30.127496892 +0000 UTC m=+77.364525102" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.133105 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" event={"ID":"db5a985c-5b4c-4ab5-ab7c-61b356b88494","Type":"ContainerStarted","Data":"5730e264550758c5119d577ae68abd49b3dde52cb26dd2aa4dd6d0f63f9bb462"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.141598 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" event={"ID":"ddf9e91c-0239-4d06-af1e-9ef7d22e048a","Type":"ContainerStarted","Data":"746bffb9294b553187abbd54f34393c09ecb3879942d33d2268db755df49df5f"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.154438 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" event={"ID":"64e28912-899a-4d7f-8c6a-dffc3ca9f1b7","Type":"ContainerStarted","Data":"79f9e7fbe051411bb4eb8483e909ed7ee2c27a6acdaa3847180602eb125945c3"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.157028 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-7fkdm" event={"ID":"6561ed20-6634-4df7-891e-3c7f3e9427b0","Type":"ContainerStarted","Data":"bccd5205914ac50233a5d2421f3fbe340c18e3c3c8c5ce6272c51c0ded00824f"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.176052 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" event={"ID":"9a6a8e52-38d8-41a6-863f-78255609c063","Type":"ContainerStarted","Data":"af8b4577d6aed3196e9a4ed07893dfe9678cfa0e73342e6e1f76f17118c3691a"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.176108 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.185347 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-z5jcj" podStartSLOduration=38.185323642 podStartE2EDuration="38.185323642s" podCreationTimestamp="2026-01-27 12:26:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:30.180989808 +0000 UTC m=+77.418018018" watchObservedRunningTime="2026-01-27 12:27:30.185323642 +0000 UTC m=+77.422351852" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.191598 4900 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-jnwth container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.191669 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" podUID="9a6a8e52-38d8-41a6-863f-78255609c063" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.194528 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-lld9r" event={"ID":"4a0de235-4f39-4196-93a2-a4e7e1637c19","Type":"ContainerStarted","Data":"f016bb763266a5b31814763b3306467950b8bdcd788cb519faeb23bb1e668457"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.200281 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf" event={"ID":"d9879637-6084-4966-9f47-2b99f22ea469","Type":"ContainerStarted","Data":"67b26f0cacd427c71913b12c5ef3ccd49ee52b6184f7242146c19ab65ba073e6"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.218651 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:30 crc kubenswrapper[4900]: E0127 12:27:30.220322 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:30.720308746 +0000 UTC m=+77.957336956 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.220686 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rwdmj" event={"ID":"e4c1d903-be2e-4216-bc7a-e17c23bd6e63","Type":"ContainerStarted","Data":"4a14d5b2b00e00204ce21fd017f63973834526a0996828f6b3f4f2f85677dfeb"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.232459 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-75clq" event={"ID":"a271161a-8adc-4b11-a132-03c70bed453e","Type":"ContainerStarted","Data":"6ac5afc4b60e8ba607600b19fe1cfc75684ab152ddbd9e45177acfcda541db2f"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.259179 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-pdc9s" event={"ID":"bd0dd2d2-a1be-492e-a685-eca20df9dca4","Type":"ContainerStarted","Data":"ad289773bb3979e8f7afa7f0e9ac59da83c4db01d0820a7ec5d074d2f8bfe081"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.279597 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz" event={"ID":"176e188c-fe42-4703-8b29-2ad483a60231","Type":"ContainerStarted","Data":"a6003cb3ddd16b49536d245128cfae5414290790917d6cb18c8116e523bf4147"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.282367 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c" event={"ID":"36d27b4d-5f64-4436-8fd1-54243b90a439","Type":"ContainerStarted","Data":"613e38fae672b5a276e90a3dbc51ae7e2286f16b6e6ee302468b6be3699abc1d"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.282416 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c" event={"ID":"36d27b4d-5f64-4436-8fd1-54243b90a439","Type":"ContainerStarted","Data":"44bebc1e8882cb27d3dc5370ee72933e87fdb96c9695bfb68aace86884a382a8"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.292737 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" event={"ID":"55206b88-053e-4616-bfb0-82f5d8a2d4f9","Type":"ContainerStarted","Data":"31f83169e5251d90bc357c659f304b3cee9b921c07a510166beee489c5d77aee"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.304241 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-599cn" event={"ID":"87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd","Type":"ContainerStarted","Data":"cf2d67fdd3efecd9f5e0b3149cf57f43889ae9c8cf3ada7a90a37c62cd1004d0"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.313547 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" event={"ID":"665d09a8-9764-4b5d-975e-5c96fd671dd5","Type":"ContainerStarted","Data":"aea49d80580735b7c3077d2ecd2a58286854904b62c4438bcc939e0966a00158"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.314411 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.322136 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq" event={"ID":"419258b7-ee70-4a6a-afdb-e35cd0570fc2","Type":"ContainerStarted","Data":"d88a91e89170c7bac395903808a1d0d7ac0320745a5341ea24ff98412b2ef469"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.323024 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" start-of-body= Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.323069 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.323602 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:30 crc kubenswrapper[4900]: E0127 12:27:30.323932 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:30.823916001 +0000 UTC m=+78.060944211 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.329400 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv" event={"ID":"977b9e9a-e953-4ec8-bd40-382b18f806d1","Type":"ContainerStarted","Data":"803477faf7b7c38661b5844222ebd5cc4065dcea5519bd21e5553f6c024b3e7f"} Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.331242 4900 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8kcp8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.34:8080/healthz\": dial tcp 10.217.0.34:8080: connect: connection refused" start-of-body= Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.331294 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" podUID="0c526262-6ee4-4526-91d7-614b3cd91082" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.34:8080/healthz\": dial tcp 10.217.0.34:8080: connect: connection refused" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.331554 4900 patch_prober.go:28] interesting pod/console-operator-58897d9998-x96hr container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/readyz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.331730 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-x96hr" podUID="a3973a7c-f509-4769-93a6-4f71f99cc515" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/readyz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.332379 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.332406 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.333167 4900 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-52vsm container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.333197 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" podUID="9080a9b0-9613-4077-bbbc-6ff558b4180c" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.333298 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.334296 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" podStartSLOduration=40.334278549 podStartE2EDuration="40.334278549s" podCreationTimestamp="2026-01-27 12:26:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:30.292110188 +0000 UTC m=+77.529138398" watchObservedRunningTime="2026-01-27 12:27:30.334278549 +0000 UTC m=+77.571306769" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.337603 4900 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zdrjw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.337680 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podUID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.358687 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-lld9r" podStartSLOduration=38.358644268 podStartE2EDuration="38.358644268s" podCreationTimestamp="2026-01-27 12:26:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:30.356695892 +0000 UTC m=+77.593724102" watchObservedRunningTime="2026-01-27 12:27:30.358644268 +0000 UTC m=+77.595672478" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.359146 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" podStartSLOduration=39.359138402 podStartE2EDuration="39.359138402s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:30.337733658 +0000 UTC m=+77.574761878" watchObservedRunningTime="2026-01-27 12:27:30.359138402 +0000 UTC m=+77.596166632" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.380249 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pk9tz" podStartSLOduration=39.380227478 podStartE2EDuration="39.380227478s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:30.379566709 +0000 UTC m=+77.616594929" watchObservedRunningTime="2026-01-27 12:27:30.380227478 +0000 UTC m=+77.617255688" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.425304 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:30 crc kubenswrapper[4900]: E0127 12:27:30.427296 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:30.927273579 +0000 UTC m=+78.164301889 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.480154 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-wrmdd" podStartSLOduration=39.480129896 podStartE2EDuration="39.480129896s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:30.402161098 +0000 UTC m=+77.639189308" watchObservedRunningTime="2026-01-27 12:27:30.480129896 +0000 UTC m=+77.717158106" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.484123 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xrbcq" podStartSLOduration=39.48410733 podStartE2EDuration="39.48410733s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:30.453600944 +0000 UTC m=+77.690629164" watchObservedRunningTime="2026-01-27 12:27:30.48410733 +0000 UTC m=+77.721135540" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.517105 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podStartSLOduration=39.517077347 podStartE2EDuration="39.517077347s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:30.516167411 +0000 UTC m=+77.753195621" watchObservedRunningTime="2026-01-27 12:27:30.517077347 +0000 UTC m=+77.754105557" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.526898 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:30 crc kubenswrapper[4900]: E0127 12:27:30.527837 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.027817965 +0000 UTC m=+78.264846175 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.629063 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:30 crc kubenswrapper[4900]: E0127 12:27:30.629791 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.129757032 +0000 UTC m=+78.366785242 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.635189 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podStartSLOduration=39.635165577 podStartE2EDuration="39.635165577s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:30.613472055 +0000 UTC m=+77.850500285" watchObservedRunningTime="2026-01-27 12:27:30.635165577 +0000 UTC m=+77.872193787" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.731358 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:30 crc kubenswrapper[4900]: E0127 12:27:30.731788 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.231770351 +0000 UTC m=+78.468798561 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.833271 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:30 crc kubenswrapper[4900]: E0127 12:27:30.833947 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.333916104 +0000 UTC m=+78.570944314 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.840437 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.840587 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.934375 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:30 crc kubenswrapper[4900]: E0127 12:27:30.934546 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.434523863 +0000 UTC m=+78.671552073 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:30 crc kubenswrapper[4900]: I0127 12:27:30.934649 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:30 crc kubenswrapper[4900]: E0127 12:27:30.934956 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.434945415 +0000 UTC m=+78.671973625 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.035861 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:31 crc kubenswrapper[4900]: E0127 12:27:31.036154 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.536105449 +0000 UTC m=+78.773133659 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.036294 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:31 crc kubenswrapper[4900]: E0127 12:27:31.036830 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.53681899 +0000 UTC m=+78.773847200 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.138106 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:31 crc kubenswrapper[4900]: E0127 12:27:31.138284 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.638243082 +0000 UTC m=+78.875271312 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.138809 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:31 crc kubenswrapper[4900]: E0127 12:27:31.139214 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.639203359 +0000 UTC m=+78.876231569 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.243298 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:31 crc kubenswrapper[4900]: E0127 12:27:31.243605 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.743537655 +0000 UTC m=+78.980565875 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.243923 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:31 crc kubenswrapper[4900]: E0127 12:27:31.244521 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.744511833 +0000 UTC m=+78.981540043 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.348243 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-7fkdm" event={"ID":"6561ed20-6634-4df7-891e-3c7f3e9427b0","Type":"ContainerStarted","Data":"40f83160743243006a133b38f16efcaba82219bfb943e6c2a513697bb9387720"} Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.359076 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:31 crc kubenswrapper[4900]: E0127 12:27:31.360228 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.860184274 +0000 UTC m=+79.097212494 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.360642 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:31 crc kubenswrapper[4900]: E0127 12:27:31.361445 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.86143423 +0000 UTC m=+79.098462440 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.373535 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-599cn" event={"ID":"87d55f2a-17c3-4b9e-a64d-4470e9f9a6dd","Type":"ContainerStarted","Data":"cac4d964edef4d105dbbc90f1eac8a81e2815bf89f508856d40a5b736e8f4661"} Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.374639 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-599cn" Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.399318 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf" event={"ID":"d9879637-6084-4966-9f47-2b99f22ea469","Type":"ContainerStarted","Data":"c05ada28de8965d83744a14ccc4f788970a74f0685e43d01a750af6066acfd47"} Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.410399 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" event={"ID":"db5a985c-5b4c-4ab5-ab7c-61b356b88494","Type":"ContainerStarted","Data":"e73a94274cd9fbb6f43dc43bf3223ce9f882744fab8219c65167f271a208cd68"} Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.411302 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.413291 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-pdc9s" event={"ID":"bd0dd2d2-a1be-492e-a685-eca20df9dca4","Type":"ContainerStarted","Data":"eeee6b00160544ade8798a1f0def2804234f6462674479023c67c8e41cf74d2b"} Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.421562 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" event={"ID":"ef778856-8cbe-4b13-90f6-74bd28af2c86","Type":"ContainerStarted","Data":"1fec2bc0c774fa95706f745a2bdb629af68463e53741db9d7fd4e6dfed995f04"} Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.421854 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.422191 4900 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8kcp8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.34:8080/healthz\": dial tcp 10.217.0.34:8080: connect: connection refused" start-of-body= Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.422237 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" podUID="0c526262-6ee4-4526-91d7-614b3cd91082" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.34:8080/healthz\": dial tcp 10.217.0.34:8080: connect: connection refused" Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.428401 4900 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zdrjw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.429176 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" start-of-body= Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.429237 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.429311 4900 patch_prober.go:28] interesting pod/console-operator-58897d9998-x96hr container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/readyz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.429329 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-x96hr" podUID="a3973a7c-f509-4769-93a6-4f71f99cc515" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/readyz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.429087 4900 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-jnwth container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.429362 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" podUID="9a6a8e52-38d8-41a6-863f-78255609c063" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.429140 4900 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-52vsm container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.429382 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" podUID="9080a9b0-9613-4077-bbbc-6ff558b4180c" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.429306 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podUID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.462781 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:31 crc kubenswrapper[4900]: E0127 12:27:31.464360 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.964319314 +0000 UTC m=+79.201347524 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.464991 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:31 crc kubenswrapper[4900]: E0127 12:27:31.471240 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:31.971213152 +0000 UTC m=+79.208241362 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.572884 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:31 crc kubenswrapper[4900]: E0127 12:27:31.575078 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:32.075054713 +0000 UTC m=+79.312082923 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.860424 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:31 crc kubenswrapper[4900]: E0127 12:27:31.861001 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:32.360973852 +0000 UTC m=+79.598002062 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.861579 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:31 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:31 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:31 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.861659 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:31 crc kubenswrapper[4900]: I0127 12:27:31.961775 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:31 crc kubenswrapper[4900]: E0127 12:27:31.962249 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:32.462221209 +0000 UTC m=+79.699249419 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.063547 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:32 crc kubenswrapper[4900]: E0127 12:27:32.064179 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:32.564161066 +0000 UTC m=+79.801189276 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.124922 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-7fkdm" podStartSLOduration=41.12488497 podStartE2EDuration="41.12488497s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:31.88981982 +0000 UTC m=+79.126848020" watchObservedRunningTime="2026-01-27 12:27:32.12488497 +0000 UTC m=+79.361913180" Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.164539 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:32 crc kubenswrapper[4900]: E0127 12:27:32.165448 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:32.665399673 +0000 UTC m=+79.902427883 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.178353 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" podStartSLOduration=41.178320024 podStartE2EDuration="41.178320024s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:32.127222007 +0000 UTC m=+79.364250217" watchObservedRunningTime="2026-01-27 12:27:32.178320024 +0000 UTC m=+79.415348234" Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.179280 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2g4jf" podStartSLOduration=42.179270901 podStartE2EDuration="42.179270901s" podCreationTimestamp="2026-01-27 12:26:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:32.177626064 +0000 UTC m=+79.414654274" watchObservedRunningTime="2026-01-27 12:27:32.179270901 +0000 UTC m=+79.416299111" Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.266183 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:32 crc kubenswrapper[4900]: E0127 12:27:32.266627 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:32.766610549 +0000 UTC m=+80.003638759 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.283603 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-rwdmj" podStartSLOduration=41.283555825 podStartE2EDuration="41.283555825s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:32.281582018 +0000 UTC m=+79.518610228" watchObservedRunningTime="2026-01-27 12:27:32.283555825 +0000 UTC m=+79.520584035" Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.285525 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-599cn" podStartSLOduration=10.285512401 podStartE2EDuration="10.285512401s" podCreationTimestamp="2026-01-27 12:27:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:32.247727426 +0000 UTC m=+79.484755636" watchObservedRunningTime="2026-01-27 12:27:32.285512401 +0000 UTC m=+79.522540611" Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.457188 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:32 crc kubenswrapper[4900]: E0127 12:27:32.457365 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:32.957342835 +0000 UTC m=+80.194371045 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.457599 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:32 crc kubenswrapper[4900]: E0127 12:27:32.457996 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:32.957988383 +0000 UTC m=+80.195016593 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.509774 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9zd6c" podStartSLOduration=41.509744049 podStartE2EDuration="41.509744049s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:32.506601599 +0000 UTC m=+79.743629809" watchObservedRunningTime="2026-01-27 12:27:32.509744049 +0000 UTC m=+79.746774119" Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.544384 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.562741 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:32 crc kubenswrapper[4900]: E0127 12:27:32.563893 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:33.063860263 +0000 UTC m=+80.300888473 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.564440 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:32 crc kubenswrapper[4900]: E0127 12:27:32.565728 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:33.065702966 +0000 UTC m=+80.302731176 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:32 crc kubenswrapper[4900]: E0127 12:27:32.665730 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:33.165706907 +0000 UTC m=+80.402735117 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.665595 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.666135 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:32 crc kubenswrapper[4900]: E0127 12:27:32.666497 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:33.166487179 +0000 UTC m=+80.403515389 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.697784 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-gs2tm" podStartSLOduration=41.697753967 podStartE2EDuration="41.697753967s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:32.572926513 +0000 UTC m=+79.809954723" watchObservedRunningTime="2026-01-27 12:27:32.697753967 +0000 UTC m=+79.934782177" Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.767191 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:32 crc kubenswrapper[4900]: E0127 12:27:32.767324 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:33.267302824 +0000 UTC m=+80.504331034 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.767607 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:32 crc kubenswrapper[4900]: E0127 12:27:32.767937 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:33.267928072 +0000 UTC m=+80.504956282 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.814742 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p297m" podStartSLOduration=41.814398966 podStartE2EDuration="41.814398966s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:32.813889422 +0000 UTC m=+80.050917652" watchObservedRunningTime="2026-01-27 12:27:32.814398966 +0000 UTC m=+80.051427176" Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.817502 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" podStartSLOduration=41.817485105 podStartE2EDuration="41.817485105s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:32.699710903 +0000 UTC m=+79.936739113" watchObservedRunningTime="2026-01-27 12:27:32.817485105 +0000 UTC m=+80.054513325" Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.848147 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:32 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:32 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:32 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.848233 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.873921 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:32 crc kubenswrapper[4900]: E0127 12:27:32.874452 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:33.37443052 +0000 UTC m=+80.611458740 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.892368 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.894166 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" podStartSLOduration=41.894140776 podStartE2EDuration="41.894140776s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:32.893415865 +0000 UTC m=+80.130444095" watchObservedRunningTime="2026-01-27 12:27:32.894140776 +0000 UTC m=+80.131168986" Jan 27 12:27:32 crc kubenswrapper[4900]: I0127 12:27:32.984212 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:32 crc kubenswrapper[4900]: E0127 12:27:32.985311 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:33.485287173 +0000 UTC m=+80.722315453 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.005693 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4rxs" podStartSLOduration=42.005662638 podStartE2EDuration="42.005662638s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:32.954703185 +0000 UTC m=+80.191731405" watchObservedRunningTime="2026-01-27 12:27:33.005662638 +0000 UTC m=+80.242690848" Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.007841 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-pdc9s" podStartSLOduration=42.00783228 podStartE2EDuration="42.00783228s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:33.00539054 +0000 UTC m=+80.242418750" watchObservedRunningTime="2026-01-27 12:27:33.00783228 +0000 UTC m=+80.244860490" Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.059454 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" podStartSLOduration=11.059426581 podStartE2EDuration="11.059426581s" podCreationTimestamp="2026-01-27 12:27:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:33.055681064 +0000 UTC m=+80.292709274" watchObservedRunningTime="2026-01-27 12:27:33.059426581 +0000 UTC m=+80.296454791" Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.085737 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:33 crc kubenswrapper[4900]: E0127 12:27:33.085757 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:33.585710626 +0000 UTC m=+80.822738856 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.085999 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:33 crc kubenswrapper[4900]: E0127 12:27:33.086452 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:33.586439037 +0000 UTC m=+80.823467327 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.170641 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-k6tpv" podStartSLOduration=42.170609954 podStartE2EDuration="42.170609954s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:33.12137954 +0000 UTC m=+80.358407760" watchObservedRunningTime="2026-01-27 12:27:33.170609954 +0000 UTC m=+80.407638174" Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.188236 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:33 crc kubenswrapper[4900]: E0127 12:27:33.189007 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:33.688982701 +0000 UTC m=+80.926010911 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.233093 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=1.233039686 podStartE2EDuration="1.233039686s" podCreationTimestamp="2026-01-27 12:27:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:33.218702185 +0000 UTC m=+80.455730425" watchObservedRunningTime="2026-01-27 12:27:33.233039686 +0000 UTC m=+80.470067916" Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.290480 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:33 crc kubenswrapper[4900]: E0127 12:27:33.290956 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:33.790938789 +0000 UTC m=+81.027967009 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.391656 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:33 crc kubenswrapper[4900]: E0127 12:27:33.391970 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:33.891946938 +0000 UTC m=+81.128975148 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.490830 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.492650 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:33 crc kubenswrapper[4900]: E0127 12:27:33.492982 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:33.992969438 +0000 UTC m=+81.229997648 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.493821 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.530156 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.530297 4900 patch_prober.go:28] interesting pod/apiserver-76f77b778f-vrhjw container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.530379 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" podUID="511728ec-f571-4333-b0e2-c6a897c6c2d5" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.531226 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.593541 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:33 crc kubenswrapper[4900]: E0127 12:27:33.596440 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:34.096414188 +0000 UTC m=+81.333442398 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.598361 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.696074 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:33 crc kubenswrapper[4900]: E0127 12:27:33.698172 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:34.198149079 +0000 UTC m=+81.435177489 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.798649 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:33 crc kubenswrapper[4900]: E0127 12:27:33.799395 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:34.299367626 +0000 UTC m=+81.536395836 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.900735 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:33 crc kubenswrapper[4900]: E0127 12:27:33.901169 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:34.401153928 +0000 UTC m=+81.638182148 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.903110 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:33 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:33 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:33 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.903190 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.990124 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.990230 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.992104 4900 patch_prober.go:28] interesting pod/console-f9d7485db-5zbpj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Jan 27 12:27:33 crc kubenswrapper[4900]: I0127 12:27:33.992207 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-5zbpj" podUID="6893cb7a-209f-4822-9e82-34ad39c7647f" containerName="console" probeResult="failure" output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.002234 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:34 crc kubenswrapper[4900]: E0127 12:27:34.002912 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:34.502884629 +0000 UTC m=+81.739912839 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.103952 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:34 crc kubenswrapper[4900]: E0127 12:27:34.104362 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:34.604347732 +0000 UTC m=+81.841375942 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.106851 4900 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-v5vsd container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.106902 4900 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-v5vsd container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.106907 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" podUID="ef778856-8cbe-4b13-90f6-74bd28af2c86" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.106933 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" podUID="ef778856-8cbe-4b13-90f6-74bd28af2c86" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.117701 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.117751 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.117709 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.117807 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.205016 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:34 crc kubenswrapper[4900]: E0127 12:27:34.205528 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:34.705469635 +0000 UTC m=+81.942497835 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.294525 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.306444 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:34 crc kubenswrapper[4900]: E0127 12:27:34.306874 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:34.806858406 +0000 UTC m=+82.043886616 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.407687 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:34 crc kubenswrapper[4900]: E0127 12:27:34.408017 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:34.907960949 +0000 UTC m=+82.144989159 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.408135 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:34 crc kubenswrapper[4900]: E0127 12:27:34.408689 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:34.90867314 +0000 UTC m=+82.145701350 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.509741 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:34 crc kubenswrapper[4900]: E0127 12:27:34.510539 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:35.010508393 +0000 UTC m=+82.247536603 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.591544 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-csvht" Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.611649 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:34 crc kubenswrapper[4900]: E0127 12:27:34.612139 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:35.112117631 +0000 UTC m=+82.349145841 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.713904 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:34 crc kubenswrapper[4900]: E0127 12:27:34.714774 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:35.214756218 +0000 UTC m=+82.451784428 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.735232 4900 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8kcp8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.34:8080/healthz\": dial tcp 10.217.0.34:8080: connect: connection refused" start-of-body= Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.735318 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" podUID="0c526262-6ee4-4526-91d7-614b3cd91082" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.34:8080/healthz\": dial tcp 10.217.0.34:8080: connect: connection refused" Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.741293 4900 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8kcp8 container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.34:8080/healthz\": dial tcp 10.217.0.34:8080: connect: connection refused" start-of-body= Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.741381 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" podUID="0c526262-6ee4-4526-91d7-614b3cd91082" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.34:8080/healthz\": dial tcp 10.217.0.34:8080: connect: connection refused" Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.818249 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:34 crc kubenswrapper[4900]: E0127 12:27:34.818712 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:35.318697082 +0000 UTC m=+82.555725302 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.876691 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.877291 4900 patch_prober.go:28] interesting pod/console-operator-58897d9998-x96hr container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.877312 4900 patch_prober.go:28] interesting pod/console-operator-58897d9998-x96hr container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/readyz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.877363 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-x96hr" podUID="a3973a7c-f509-4769-93a6-4f71f99cc515" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.877398 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-x96hr" podUID="a3973a7c-f509-4769-93a6-4f71f99cc515" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/readyz\": dial tcp 10.217.0.29:8443: connect: connection refused" Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.884050 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:34 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:34 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:34 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.884145 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.919353 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:34 crc kubenswrapper[4900]: E0127 12:27:34.919766 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:35.419722322 +0000 UTC m=+82.656750532 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:34 crc kubenswrapper[4900]: I0127 12:27:34.920075 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:34 crc kubenswrapper[4900]: E0127 12:27:34.920491 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:35.420474174 +0000 UTC m=+82.657502444 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.091302 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:35 crc kubenswrapper[4900]: E0127 12:27:35.092749 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:35.59272861 +0000 UTC m=+82.829756820 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.192124 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:35 crc kubenswrapper[4900]: E0127 12:27:35.192533 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:35.692516385 +0000 UTC m=+82.929544595 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.245850 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.293262 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:35 crc kubenswrapper[4900]: E0127 12:27:35.293459 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:35.793432912 +0000 UTC m=+83.030461132 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.293607 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:35 crc kubenswrapper[4900]: E0127 12:27:35.293888 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:35.793879805 +0000 UTC m=+83.030908015 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.395221 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:35 crc kubenswrapper[4900]: E0127 12:27:35.395477 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:35.895437261 +0000 UTC m=+83.132465471 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.395721 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:35 crc kubenswrapper[4900]: E0127 12:27:35.396265 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:35.896256155 +0000 UTC m=+83.133284375 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.409646 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.410583 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.422967 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.423130 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.445136 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.497294 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.497650 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/570146c6-131d-43b7-90f3-93ad768ca7ca-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"570146c6-131d-43b7-90f3-93ad768ca7ca\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.497899 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/570146c6-131d-43b7-90f3-93ad768ca7ca-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"570146c6-131d-43b7-90f3-93ad768ca7ca\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 12:27:35 crc kubenswrapper[4900]: E0127 12:27:35.498141 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:35.998108319 +0000 UTC m=+83.235136529 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.584208 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" event={"ID":"ddf9e91c-0239-4d06-af1e-9ef7d22e048a","Type":"ContainerStarted","Data":"afc743a67d296d2cf3a9f6e5853b1eacced0b56e542fdf8b33da1af2e7a6b9bc"} Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.599184 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/570146c6-131d-43b7-90f3-93ad768ca7ca-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"570146c6-131d-43b7-90f3-93ad768ca7ca\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.599251 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.599336 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/570146c6-131d-43b7-90f3-93ad768ca7ca-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"570146c6-131d-43b7-90f3-93ad768ca7ca\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.599539 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/570146c6-131d-43b7-90f3-93ad768ca7ca-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"570146c6-131d-43b7-90f3-93ad768ca7ca\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 12:27:35 crc kubenswrapper[4900]: E0127 12:27:35.600533 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:36.100514299 +0000 UTC m=+83.337542609 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.625279 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/570146c6-131d-43b7-90f3-93ad768ca7ca-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"570146c6-131d-43b7-90f3-93ad768ca7ca\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.701755 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:35 crc kubenswrapper[4900]: E0127 12:27:35.701886 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:36.201859649 +0000 UTC m=+83.438887859 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.702321 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:35 crc kubenswrapper[4900]: E0127 12:27:35.702633 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:36.202625021 +0000 UTC m=+83.439653231 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.736676 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.802903 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:35 crc kubenswrapper[4900]: E0127 12:27:35.803166 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:36.303120006 +0000 UTC m=+83.540148216 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.803354 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:35 crc kubenswrapper[4900]: E0127 12:27:35.804036 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:36.304022842 +0000 UTC m=+83.541051052 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.843566 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:35 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:35 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:35 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.843643 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:35 crc kubenswrapper[4900]: I0127 12:27:35.904762 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:35 crc kubenswrapper[4900]: E0127 12:27:35.905379 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:36.405356891 +0000 UTC m=+83.642385111 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.006992 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:36 crc kubenswrapper[4900]: E0127 12:27:36.007527 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:36.507503954 +0000 UTC m=+83.744532164 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.067315 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.088912 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.108969 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:36 crc kubenswrapper[4900]: E0127 12:27:36.109866 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:36.609840972 +0000 UTC m=+83.846869182 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.112318 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:36 crc kubenswrapper[4900]: E0127 12:27:36.114675 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:36.614656881 +0000 UTC m=+83.851685091 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.152412 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.214860 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:36 crc kubenswrapper[4900]: E0127 12:27:36.215008 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:36.714984921 +0000 UTC m=+83.952013131 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.215045 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:36 crc kubenswrapper[4900]: E0127 12:27:36.215500 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:36.715490066 +0000 UTC m=+83.952518276 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.318860 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:36 crc kubenswrapper[4900]: E0127 12:27:36.319276 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:36.819256495 +0000 UTC m=+84.056284705 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.387480 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.400686 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-grrcf"] Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.401714 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.407591 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.420606 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:36 crc kubenswrapper[4900]: E0127 12:27:36.421810 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:36.921692386 +0000 UTC m=+84.158720586 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.460884 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-grrcf"] Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.523985 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:36 crc kubenswrapper[4900]: E0127 12:27:36.545481 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:37.045436039 +0000 UTC m=+84.282464249 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.588244 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jvcrt"] Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.591263 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.608513 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.611962 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jvcrt"] Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.626271 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/754ddb47-7891-4142-b52c-98a29ca40078-utilities\") pod \"certified-operators-grrcf\" (UID: \"754ddb47-7891-4142-b52c-98a29ca40078\") " pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.626363 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wn6nj\" (UniqueName: \"kubernetes.io/projected/754ddb47-7891-4142-b52c-98a29ca40078-kube-api-access-wn6nj\") pod \"certified-operators-grrcf\" (UID: \"754ddb47-7891-4142-b52c-98a29ca40078\") " pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.626387 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/754ddb47-7891-4142-b52c-98a29ca40078-catalog-content\") pod \"certified-operators-grrcf\" (UID: \"754ddb47-7891-4142-b52c-98a29ca40078\") " pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.626477 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:36 crc kubenswrapper[4900]: E0127 12:27:36.626911 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:37.126888888 +0000 UTC m=+84.363917098 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.731467 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.732147 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wn6nj\" (UniqueName: \"kubernetes.io/projected/754ddb47-7891-4142-b52c-98a29ca40078-kube-api-access-wn6nj\") pod \"certified-operators-grrcf\" (UID: \"754ddb47-7891-4142-b52c-98a29ca40078\") " pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.732184 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/754ddb47-7891-4142-b52c-98a29ca40078-catalog-content\") pod \"certified-operators-grrcf\" (UID: \"754ddb47-7891-4142-b52c-98a29ca40078\") " pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:27:36 crc kubenswrapper[4900]: E0127 12:27:36.732305 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:37.232256033 +0000 UTC m=+84.469284403 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.732440 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86867c21-fd52-44e7-85d2-87da48322397-utilities\") pod \"community-operators-jvcrt\" (UID: \"86867c21-fd52-44e7-85d2-87da48322397\") " pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.732605 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86867c21-fd52-44e7-85d2-87da48322397-catalog-content\") pod \"community-operators-jvcrt\" (UID: \"86867c21-fd52-44e7-85d2-87da48322397\") " pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.732732 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.732766 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/754ddb47-7891-4142-b52c-98a29ca40078-catalog-content\") pod \"certified-operators-grrcf\" (UID: \"754ddb47-7891-4142-b52c-98a29ca40078\") " pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.732787 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkh6z\" (UniqueName: \"kubernetes.io/projected/86867c21-fd52-44e7-85d2-87da48322397-kube-api-access-rkh6z\") pod \"community-operators-jvcrt\" (UID: \"86867c21-fd52-44e7-85d2-87da48322397\") " pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:27:36 crc kubenswrapper[4900]: E0127 12:27:36.734298 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:37.234284781 +0000 UTC m=+84.471312991 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.735112 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/754ddb47-7891-4142-b52c-98a29ca40078-utilities\") pod \"certified-operators-grrcf\" (UID: \"754ddb47-7891-4142-b52c-98a29ca40078\") " pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.744609 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/754ddb47-7891-4142-b52c-98a29ca40078-utilities\") pod \"certified-operators-grrcf\" (UID: \"754ddb47-7891-4142-b52c-98a29ca40078\") " pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.763101 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-n9dph"] Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.765199 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.779955 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wn6nj\" (UniqueName: \"kubernetes.io/projected/754ddb47-7891-4142-b52c-98a29ca40078-kube-api-access-wn6nj\") pod \"certified-operators-grrcf\" (UID: \"754ddb47-7891-4142-b52c-98a29ca40078\") " pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.781423 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n9dph"] Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.795504 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 27 12:27:36 crc kubenswrapper[4900]: W0127 12:27:36.822256 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod570146c6_131d_43b7_90f3_93ad768ca7ca.slice/crio-ee79859ee70ed4d2e27a19a52eebca8554d82e55aa597d3428d52648190e0644 WatchSource:0}: Error finding container ee79859ee70ed4d2e27a19a52eebca8554d82e55aa597d3428d52648190e0644: Status 404 returned error can't find the container with id ee79859ee70ed4d2e27a19a52eebca8554d82e55aa597d3428d52648190e0644 Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.838038 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.838489 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86867c21-fd52-44e7-85d2-87da48322397-utilities\") pod \"community-operators-jvcrt\" (UID: \"86867c21-fd52-44e7-85d2-87da48322397\") " pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.838546 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86867c21-fd52-44e7-85d2-87da48322397-catalog-content\") pod \"community-operators-jvcrt\" (UID: \"86867c21-fd52-44e7-85d2-87da48322397\") " pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.838617 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkh6z\" (UniqueName: \"kubernetes.io/projected/86867c21-fd52-44e7-85d2-87da48322397-kube-api-access-rkh6z\") pod \"community-operators-jvcrt\" (UID: \"86867c21-fd52-44e7-85d2-87da48322397\") " pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:27:36 crc kubenswrapper[4900]: E0127 12:27:36.839285 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:37.339259985 +0000 UTC m=+84.576288195 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.839781 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86867c21-fd52-44e7-85d2-87da48322397-utilities\") pod \"community-operators-jvcrt\" (UID: \"86867c21-fd52-44e7-85d2-87da48322397\") " pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.839983 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86867c21-fd52-44e7-85d2-87da48322397-catalog-content\") pod \"community-operators-jvcrt\" (UID: \"86867c21-fd52-44e7-85d2-87da48322397\") " pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.853855 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:36 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:36 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:36 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.855932 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.882223 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkh6z\" (UniqueName: \"kubernetes.io/projected/86867c21-fd52-44e7-85d2-87da48322397-kube-api-access-rkh6z\") pod \"community-operators-jvcrt\" (UID: \"86867c21-fd52-44e7-85d2-87da48322397\") " pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.944636 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fptvd\" (UniqueName: \"kubernetes.io/projected/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-kube-api-access-fptvd\") pod \"certified-operators-n9dph\" (UID: \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\") " pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.944738 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.944847 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-catalog-content\") pod \"certified-operators-n9dph\" (UID: \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\") " pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.944912 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-utilities\") pod \"certified-operators-n9dph\" (UID: \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\") " pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:27:36 crc kubenswrapper[4900]: E0127 12:27:36.945598 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:37.445576927 +0000 UTC m=+84.682605137 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:36 crc kubenswrapper[4900]: I0127 12:27:36.954613 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-q5sf2"] Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:36.974370 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.003545 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.045822 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.046173 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-utilities\") pod \"certified-operators-n9dph\" (UID: \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\") " pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.046245 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fptvd\" (UniqueName: \"kubernetes.io/projected/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-kube-api-access-fptvd\") pod \"certified-operators-n9dph\" (UID: \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\") " pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.046322 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-catalog-content\") pod \"certified-operators-n9dph\" (UID: \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\") " pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.047509 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-utilities\") pod \"certified-operators-n9dph\" (UID: \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\") " pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:27:37 crc kubenswrapper[4900]: E0127 12:27:37.047745 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:37.547713019 +0000 UTC m=+84.784741269 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.047900 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-catalog-content\") pod \"certified-operators-n9dph\" (UID: \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\") " pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.048516 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.071206 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q5sf2"] Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.104312 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fptvd\" (UniqueName: \"kubernetes.io/projected/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-kube-api-access-fptvd\") pod \"certified-operators-n9dph\" (UID: \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\") " pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.134323 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-clhb8"] Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.134521 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.150221 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.150549 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8582r\" (UniqueName: \"kubernetes.io/projected/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-kube-api-access-8582r\") pod \"community-operators-q5sf2\" (UID: \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\") " pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.150736 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-utilities\") pod \"community-operators-q5sf2\" (UID: \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\") " pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.150877 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-catalog-content\") pod \"community-operators-q5sf2\" (UID: \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\") " pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:27:37 crc kubenswrapper[4900]: E0127 12:27:37.153789 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:37.653759864 +0000 UTC m=+84.890788084 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.262378 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.262621 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8582r\" (UniqueName: \"kubernetes.io/projected/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-kube-api-access-8582r\") pod \"community-operators-q5sf2\" (UID: \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\") " pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.262714 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-utilities\") pod \"community-operators-q5sf2\" (UID: \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\") " pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.262781 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-catalog-content\") pod \"community-operators-q5sf2\" (UID: \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\") " pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.263435 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-catalog-content\") pod \"community-operators-q5sf2\" (UID: \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\") " pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:27:37 crc kubenswrapper[4900]: E0127 12:27:37.263551 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:37.763532626 +0000 UTC m=+85.000560826 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.264234 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-utilities\") pod \"community-operators-q5sf2\" (UID: \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\") " pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.379333 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:37 crc kubenswrapper[4900]: E0127 12:27:37.380142 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:37.880115673 +0000 UTC m=+85.117143883 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.450671 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8582r\" (UniqueName: \"kubernetes.io/projected/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-kube-api-access-8582r\") pod \"community-operators-q5sf2\" (UID: \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\") " pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.480723 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:37 crc kubenswrapper[4900]: E0127 12:27:37.481190 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:37.981165614 +0000 UTC m=+85.218193824 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.582263 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:37 crc kubenswrapper[4900]: E0127 12:27:37.582725 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:38.08271052 +0000 UTC m=+85.319738730 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.649207 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.683814 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:37 crc kubenswrapper[4900]: E0127 12:27:37.684469 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:38.184448341 +0000 UTC m=+85.421476551 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.695437 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" podUID="1a5d7b57-de98-445e-83b6-1ff0eb859e01" containerName="kube-multus-additional-cni-plugins" containerID="cri-o://29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" gracePeriod=30 Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.696166 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"570146c6-131d-43b7-90f3-93ad768ca7ca","Type":"ContainerStarted","Data":"ee79859ee70ed4d2e27a19a52eebca8554d82e55aa597d3428d52648190e0644"} Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.787986 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.788228 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs\") pod \"network-metrics-daemon-7gwzm\" (UID: \"b6e155fd-bee9-4c32-9919-0dbee597003e\") " pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:37 crc kubenswrapper[4900]: E0127 12:27:37.789530 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:38.289506487 +0000 UTC m=+85.526534697 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.831753 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b6e155fd-bee9-4c32-9919-0dbee597003e-metrics-certs\") pod \"network-metrics-daemon-7gwzm\" (UID: \"b6e155fd-bee9-4c32-9919-0dbee597003e\") " pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.940705 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:37 crc kubenswrapper[4900]: E0127 12:27:37.941194 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:38.441176022 +0000 UTC m=+85.678204232 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.950478 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7gwzm" Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.986527 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:37 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:37 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:37 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:37 crc kubenswrapper[4900]: I0127 12:27:37.986597 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.042537 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:38 crc kubenswrapper[4900]: E0127 12:27:38.042933 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:38.542916623 +0000 UTC m=+85.779944843 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.106381 4900 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-v5vsd container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.17:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.106654 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" podUID="ef778856-8cbe-4b13-90f6-74bd28af2c86" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.147929 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:38 crc kubenswrapper[4900]: E0127 12:27:38.148346 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:38.648329389 +0000 UTC m=+85.885357599 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.158717 4900 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-v5vsd container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.158788 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" podUID="ef778856-8cbe-4b13-90f6-74bd28af2c86" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.252891 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:38 crc kubenswrapper[4900]: E0127 12:27:38.253511 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:38.753493159 +0000 UTC m=+85.990521369 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.354128 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:38 crc kubenswrapper[4900]: E0127 12:27:38.354523 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:38.854474118 +0000 UTC m=+86.091502318 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.407602 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jvcrt"] Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.456263 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:38 crc kubenswrapper[4900]: E0127 12:27:38.456828 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:38.956810786 +0000 UTC m=+86.193838996 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.558263 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:38 crc kubenswrapper[4900]: E0127 12:27:38.559204 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:39.059181565 +0000 UTC m=+86.296209775 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.606474 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-r6fg7"] Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.608427 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.616578 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.639597 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r6fg7"] Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.666275 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:38 crc kubenswrapper[4900]: E0127 12:27:38.667437 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:39.167406153 +0000 UTC m=+86.404434363 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.769459 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jvcrt" event={"ID":"86867c21-fd52-44e7-85d2-87da48322397","Type":"ContainerStarted","Data":"bd512cb49e94dd7f0a85ffa4d0d1788262c9a05746796b60a472be8302717d9e"} Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.770712 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.771182 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdhgd\" (UniqueName: \"kubernetes.io/projected/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-kube-api-access-rdhgd\") pod \"redhat-marketplace-r6fg7\" (UID: \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\") " pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.771275 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-catalog-content\") pod \"redhat-marketplace-r6fg7\" (UID: \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\") " pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.771421 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-utilities\") pod \"redhat-marketplace-r6fg7\" (UID: \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\") " pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:27:38 crc kubenswrapper[4900]: E0127 12:27:38.777428 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:39.27737967 +0000 UTC m=+86.514407880 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.801659 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"570146c6-131d-43b7-90f3-93ad768ca7ca","Type":"ContainerStarted","Data":"2ec9494083f01cda29c0cf1998a1583a393fd2401dbfd5dc367d03d6ac43bc64"} Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.826785 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n9dph"] Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.826975 4900 patch_prober.go:28] interesting pod/apiserver-76f77b778f-vrhjw container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 27 12:27:38 crc kubenswrapper[4900]: [+]log ok Jan 27 12:27:38 crc kubenswrapper[4900]: [+]etcd ok Jan 27 12:27:38 crc kubenswrapper[4900]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 27 12:27:38 crc kubenswrapper[4900]: [+]poststarthook/generic-apiserver-start-informers ok Jan 27 12:27:38 crc kubenswrapper[4900]: [+]poststarthook/max-in-flight-filter ok Jan 27 12:27:38 crc kubenswrapper[4900]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 27 12:27:38 crc kubenswrapper[4900]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 27 12:27:38 crc kubenswrapper[4900]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 27 12:27:38 crc kubenswrapper[4900]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 27 12:27:38 crc kubenswrapper[4900]: [+]poststarthook/project.openshift.io-projectcache ok Jan 27 12:27:38 crc kubenswrapper[4900]: [-]poststarthook/project.openshift.io-projectauthorizationcache failed: reason withheld Jan 27 12:27:38 crc kubenswrapper[4900]: [-]poststarthook/openshift.io-startinformers failed: reason withheld Jan 27 12:27:38 crc kubenswrapper[4900]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 27 12:27:38 crc kubenswrapper[4900]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 27 12:27:38 crc kubenswrapper[4900]: livez check failed Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.827090 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" podUID="511728ec-f571-4333-b0e2-c6a897c6c2d5" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.835778 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-grrcf"] Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.851550 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q5sf2"] Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.859522 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:38 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:38 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:38 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.859580 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.878021 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-utilities\") pod \"redhat-marketplace-r6fg7\" (UID: \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\") " pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.878092 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdhgd\" (UniqueName: \"kubernetes.io/projected/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-kube-api-access-rdhgd\") pod \"redhat-marketplace-r6fg7\" (UID: \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\") " pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.878133 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-catalog-content\") pod \"redhat-marketplace-r6fg7\" (UID: \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\") " pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.878175 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:38 crc kubenswrapper[4900]: E0127 12:27:38.878527 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:39.378510944 +0000 UTC m=+86.615539154 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.878877 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-utilities\") pod \"redhat-marketplace-r6fg7\" (UID: \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\") " pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.879396 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-catalog-content\") pod \"redhat-marketplace-r6fg7\" (UID: \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\") " pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.944098 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdhgd\" (UniqueName: \"kubernetes.io/projected/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-kube-api-access-rdhgd\") pod \"redhat-marketplace-r6fg7\" (UID: \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\") " pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.958877 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5jbr6"] Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.960002 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.984624 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:38 crc kubenswrapper[4900]: E0127 12:27:38.985240 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:39.485198697 +0000 UTC m=+86.722226917 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.985442 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:38 crc kubenswrapper[4900]: E0127 12:27:38.985953 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:39.485942528 +0000 UTC m=+86.722970738 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:38 crc kubenswrapper[4900]: I0127 12:27:38.986985 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5jbr6"] Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.000587 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.020515 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-7gwzm"] Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.086906 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:39 crc kubenswrapper[4900]: E0127 12:27:39.087247 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:39.587201456 +0000 UTC m=+86.824229656 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.087430 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzvjg\" (UniqueName: \"kubernetes.io/projected/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-kube-api-access-qzvjg\") pod \"redhat-marketplace-5jbr6\" (UID: \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\") " pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.087618 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-utilities\") pod \"redhat-marketplace-5jbr6\" (UID: \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\") " pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.087674 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-catalog-content\") pod \"redhat-marketplace-5jbr6\" (UID: \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\") " pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.087725 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:39 crc kubenswrapper[4900]: E0127 12:27:39.088335 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:39.588284557 +0000 UTC m=+86.825312947 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.108492 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.109380 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.112462 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.112704 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.160142 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 27 12:27:39 crc kubenswrapper[4900]: W0127 12:27:39.183734 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb6e155fd_bee9_4c32_9919_0dbee597003e.slice/crio-a2f2075ade575aff9a8c1c822b16820f5b524dad44f15805d60c3adf1a496ed1 WatchSource:0}: Error finding container a2f2075ade575aff9a8c1c822b16820f5b524dad44f15805d60c3adf1a496ed1: Status 404 returned error can't find the container with id a2f2075ade575aff9a8c1c822b16820f5b524dad44f15805d60c3adf1a496ed1 Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.204041 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.204738 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.204788 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.205149 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-utilities\") pod \"redhat-marketplace-5jbr6\" (UID: \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\") " pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.205184 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-catalog-content\") pod \"redhat-marketplace-5jbr6\" (UID: \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\") " pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.205257 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzvjg\" (UniqueName: \"kubernetes.io/projected/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-kube-api-access-qzvjg\") pod \"redhat-marketplace-5jbr6\" (UID: \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\") " pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:27:39 crc kubenswrapper[4900]: E0127 12:27:39.205431 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:39.705386929 +0000 UTC m=+86.942415139 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.206740 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-utilities\") pod \"redhat-marketplace-5jbr6\" (UID: \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\") " pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.207805 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-catalog-content\") pod \"redhat-marketplace-5jbr6\" (UID: \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\") " pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.242274 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzvjg\" (UniqueName: \"kubernetes.io/projected/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-kube-api-access-qzvjg\") pod \"redhat-marketplace-5jbr6\" (UID: \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\") " pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.303446 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.307126 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.307176 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.307303 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:39 crc kubenswrapper[4900]: E0127 12:27:39.307650 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:39.807636294 +0000 UTC m=+87.044664504 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.307849 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.414295 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:39 crc kubenswrapper[4900]: E0127 12:27:39.414952 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:39.914922185 +0000 UTC m=+87.151950395 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.418695 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.461567 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.516567 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:39 crc kubenswrapper[4900]: E0127 12:27:39.517225 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:40.017197221 +0000 UTC m=+87.254225441 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.550216 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9s4cn"] Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.551805 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.577393 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.583009 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9s4cn"] Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.618201 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.618408 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93a3d7a3-dddf-49e3-be5b-5369108a5e13-catalog-content\") pod \"redhat-operators-9s4cn\" (UID: \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\") " pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:27:39 crc kubenswrapper[4900]: E0127 12:27:39.618557 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:40.11850728 +0000 UTC m=+87.355535490 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.618861 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93a3d7a3-dddf-49e3-be5b-5369108a5e13-utilities\") pod \"redhat-operators-9s4cn\" (UID: \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\") " pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.620011 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spv6t\" (UniqueName: \"kubernetes.io/projected/93a3d7a3-dddf-49e3-be5b-5369108a5e13-kube-api-access-spv6t\") pod \"redhat-operators-9s4cn\" (UID: \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\") " pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.620212 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:39 crc kubenswrapper[4900]: E0127 12:27:39.620868 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:40.120844917 +0000 UTC m=+87.357873137 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.723500 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.724318 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93a3d7a3-dddf-49e3-be5b-5369108a5e13-catalog-content\") pod \"redhat-operators-9s4cn\" (UID: \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\") " pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.724395 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93a3d7a3-dddf-49e3-be5b-5369108a5e13-utilities\") pod \"redhat-operators-9s4cn\" (UID: \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\") " pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.724423 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spv6t\" (UniqueName: \"kubernetes.io/projected/93a3d7a3-dddf-49e3-be5b-5369108a5e13-kube-api-access-spv6t\") pod \"redhat-operators-9s4cn\" (UID: \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\") " pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.725041 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93a3d7a3-dddf-49e3-be5b-5369108a5e13-catalog-content\") pod \"redhat-operators-9s4cn\" (UID: \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\") " pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:27:39 crc kubenswrapper[4900]: E0127 12:27:39.725244 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:40.225215164 +0000 UTC m=+87.462243524 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.725337 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93a3d7a3-dddf-49e3-be5b-5369108a5e13-utilities\") pod \"redhat-operators-9s4cn\" (UID: \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\") " pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.747008 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spv6t\" (UniqueName: \"kubernetes.io/projected/93a3d7a3-dddf-49e3-be5b-5369108a5e13-kube-api-access-spv6t\") pod \"redhat-operators-9s4cn\" (UID: \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\") " pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.826400 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:39 crc kubenswrapper[4900]: E0127 12:27:39.826976 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:40.326957235 +0000 UTC m=+87.563985445 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.854218 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:39 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:39 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:39 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.854351 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.882691 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jvcrt" event={"ID":"86867c21-fd52-44e7-85d2-87da48322397","Type":"ContainerStarted","Data":"c4b18bcd28b16e9f8937d1f037c6af60490d1ba25dcc5eda168248457a2e42be"} Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.899702 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-grrcf" event={"ID":"754ddb47-7891-4142-b52c-98a29ca40078","Type":"ContainerStarted","Data":"e7c571e5811a9fc1786cb8745b22a4661ccd0841055dbf721f9420401aa46a43"} Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.915083 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n9dph" event={"ID":"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57","Type":"ContainerStarted","Data":"f4a3aab88d8f6b62f491cbd1fe8958ed5fb4f49ed29b3f34bb23f96a3580cfc1"} Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.934669 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.942067 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5sf2" event={"ID":"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4","Type":"ContainerStarted","Data":"bb29ae9f384ff8c7938771cbe7bf622b000ffb7eddf3e55a066cdd2b98af2d87"} Jan 27 12:27:39 crc kubenswrapper[4900]: I0127 12:27:39.973234 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:39 crc kubenswrapper[4900]: E0127 12:27:39.974918 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:40.474873082 +0000 UTC m=+87.711901302 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.003512 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-7gwzm" event={"ID":"b6e155fd-bee9-4c32-9919-0dbee597003e","Type":"ContainerStarted","Data":"a2f2075ade575aff9a8c1c822b16820f5b524dad44f15805d60c3adf1a496ed1"} Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.004095 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-p7sss"] Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.006201 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.009694 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p7sss"] Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.042165 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=5.042119263 podStartE2EDuration="5.042119263s" podCreationTimestamp="2026-01-27 12:27:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:40.035471292 +0000 UTC m=+87.272499502" watchObservedRunningTime="2026-01-27 12:27:40.042119263 +0000 UTC m=+87.279147473" Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.075399 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:40 crc kubenswrapper[4900]: E0127 12:27:40.075942 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:40.575922373 +0000 UTC m=+87.812950573 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.113319 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5jbr6"] Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.134648 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5vsd" Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.141094 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r6fg7"] Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.176984 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.177661 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/160d7119-622e-4998-a7f9-7f6a362cb5a2-catalog-content\") pod \"redhat-operators-p7sss\" (UID: \"160d7119-622e-4998-a7f9-7f6a362cb5a2\") " pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.177777 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cdzt\" (UniqueName: \"kubernetes.io/projected/160d7119-622e-4998-a7f9-7f6a362cb5a2-kube-api-access-7cdzt\") pod \"redhat-operators-p7sss\" (UID: \"160d7119-622e-4998-a7f9-7f6a362cb5a2\") " pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.177954 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/160d7119-622e-4998-a7f9-7f6a362cb5a2-utilities\") pod \"redhat-operators-p7sss\" (UID: \"160d7119-622e-4998-a7f9-7f6a362cb5a2\") " pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:27:40 crc kubenswrapper[4900]: E0127 12:27:40.179186 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:40.679147647 +0000 UTC m=+87.916176017 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.247091 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.281535 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.281831 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/160d7119-622e-4998-a7f9-7f6a362cb5a2-catalog-content\") pod \"redhat-operators-p7sss\" (UID: \"160d7119-622e-4998-a7f9-7f6a362cb5a2\") " pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.281960 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cdzt\" (UniqueName: \"kubernetes.io/projected/160d7119-622e-4998-a7f9-7f6a362cb5a2-kube-api-access-7cdzt\") pod \"redhat-operators-p7sss\" (UID: \"160d7119-622e-4998-a7f9-7f6a362cb5a2\") " pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:27:40 crc kubenswrapper[4900]: E0127 12:27:40.282156 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:40.782125224 +0000 UTC m=+88.019153624 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.282415 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/160d7119-622e-4998-a7f9-7f6a362cb5a2-utilities\") pod \"redhat-operators-p7sss\" (UID: \"160d7119-622e-4998-a7f9-7f6a362cb5a2\") " pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.283209 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/160d7119-622e-4998-a7f9-7f6a362cb5a2-catalog-content\") pod \"redhat-operators-p7sss\" (UID: \"160d7119-622e-4998-a7f9-7f6a362cb5a2\") " pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.283465 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/160d7119-622e-4998-a7f9-7f6a362cb5a2-utilities\") pod \"redhat-operators-p7sss\" (UID: \"160d7119-622e-4998-a7f9-7f6a362cb5a2\") " pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.309174 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cdzt\" (UniqueName: \"kubernetes.io/projected/160d7119-622e-4998-a7f9-7f6a362cb5a2-kube-api-access-7cdzt\") pod \"redhat-operators-p7sss\" (UID: \"160d7119-622e-4998-a7f9-7f6a362cb5a2\") " pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.383232 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:40 crc kubenswrapper[4900]: E0127 12:27:40.383520 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:40.883477584 +0000 UTC m=+88.120505794 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.383727 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:40 crc kubenswrapper[4900]: E0127 12:27:40.384686 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:40.884664118 +0000 UTC m=+88.121692328 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.398244 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.400363 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9s4cn"] Jan 27 12:27:40 crc kubenswrapper[4900]: W0127 12:27:40.425033 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod93a3d7a3_dddf_49e3_be5b_5369108a5e13.slice/crio-b70ad1b84c6875c5d92740e8b6301239bbdc39a10c5e1a62a0348c8428a9d8ac WatchSource:0}: Error finding container b70ad1b84c6875c5d92740e8b6301239bbdc39a10c5e1a62a0348c8428a9d8ac: Status 404 returned error can't find the container with id b70ad1b84c6875c5d92740e8b6301239bbdc39a10c5e1a62a0348c8428a9d8ac Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.485211 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:40 crc kubenswrapper[4900]: E0127 12:27:40.485538 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:40.985493312 +0000 UTC m=+88.222521522 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.485723 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:40 crc kubenswrapper[4900]: E0127 12:27:40.486459 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:40.986440469 +0000 UTC m=+88.223468679 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.591944 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:40 crc kubenswrapper[4900]: E0127 12:27:40.592848 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:41.092825693 +0000 UTC m=+88.329853903 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.686094 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p7sss"] Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.695884 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:40 crc kubenswrapper[4900]: E0127 12:27:40.696451 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:41.196413567 +0000 UTC m=+88.433441777 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:40 crc kubenswrapper[4900]: W0127 12:27:40.698643 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod160d7119_622e_4998_a7f9_7f6a362cb5a2.slice/crio-c1f0d563e35e049fb8e4970ee52247b88361de822499469d1688ff96bbc55c95 WatchSource:0}: Error finding container c1f0d563e35e049fb8e4970ee52247b88361de822499469d1688ff96bbc55c95: Status 404 returned error can't find the container with id c1f0d563e35e049fb8e4970ee52247b88361de822499469d1688ff96bbc55c95 Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.797209 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:40 crc kubenswrapper[4900]: E0127 12:27:40.797515 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:41.297460499 +0000 UTC m=+88.534488719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.797700 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:40 crc kubenswrapper[4900]: E0127 12:27:40.798031 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:41.298008894 +0000 UTC m=+88.535037104 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.938227 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:40 crc kubenswrapper[4900]: E0127 12:27:40.938741 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:41.438719624 +0000 UTC m=+88.675747834 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.942492 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:40 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:40 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:40 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.942562 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:40 crc kubenswrapper[4900]: I0127 12:27:40.943515 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-599cn" Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.041442 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:41 crc kubenswrapper[4900]: E0127 12:27:41.043161 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:41.543139692 +0000 UTC m=+88.780167972 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.063561 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b","Type":"ContainerStarted","Data":"590fa4f0926bd0243f0e91762bd13adc6d206d16f7464d15f60c93c67c467a3a"} Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.065605 4900 generic.go:334] "Generic (PLEG): container finished" podID="86867c21-fd52-44e7-85d2-87da48322397" containerID="c4b18bcd28b16e9f8937d1f037c6af60490d1ba25dcc5eda168248457a2e42be" exitCode=0 Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.065690 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jvcrt" event={"ID":"86867c21-fd52-44e7-85d2-87da48322397","Type":"ContainerDied","Data":"c4b18bcd28b16e9f8937d1f037c6af60490d1ba25dcc5eda168248457a2e42be"} Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.068686 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.070075 4900 generic.go:334] "Generic (PLEG): container finished" podID="754ddb47-7891-4142-b52c-98a29ca40078" containerID="01ca599c69923934750353ba3076a6c802484569319254cad2c3f950e449d0f5" exitCode=0 Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.070193 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-grrcf" event={"ID":"754ddb47-7891-4142-b52c-98a29ca40078","Type":"ContainerDied","Data":"01ca599c69923934750353ba3076a6c802484569319254cad2c3f950e449d0f5"} Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.108962 4900 generic.go:334] "Generic (PLEG): container finished" podID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" containerID="c77d204b0d85ef94817a7f2cea935991f21c6df4b85f27cdcfb77778a0556619" exitCode=0 Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.109199 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n9dph" event={"ID":"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57","Type":"ContainerDied","Data":"c77d204b0d85ef94817a7f2cea935991f21c6df4b85f27cdcfb77778a0556619"} Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.129189 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-7gwzm" event={"ID":"b6e155fd-bee9-4c32-9919-0dbee597003e","Type":"ContainerStarted","Data":"7fb3ff5fb9a28eb70b4941d0c09fcd365de8e940fa30aacf35715337512b3722"} Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.131875 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7sss" event={"ID":"160d7119-622e-4998-a7f9-7f6a362cb5a2","Type":"ContainerStarted","Data":"c1f0d563e35e049fb8e4970ee52247b88361de822499469d1688ff96bbc55c95"} Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.134388 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9s4cn" event={"ID":"93a3d7a3-dddf-49e3-be5b-5369108a5e13","Type":"ContainerStarted","Data":"b70ad1b84c6875c5d92740e8b6301239bbdc39a10c5e1a62a0348c8428a9d8ac"} Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.135959 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5jbr6" event={"ID":"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d","Type":"ContainerStarted","Data":"8e008a60254b604a2fc26d221acf28e522131876394847c966625038983010b0"} Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.137746 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r6fg7" event={"ID":"354400fa-dc6c-4435-b9cf-09b5e76a6ef2","Type":"ContainerStarted","Data":"d6e0a0af1af2784d671b0260deb0bd3553dba90d889d123de5312a7d45de2678"} Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.137789 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r6fg7" event={"ID":"354400fa-dc6c-4435-b9cf-09b5e76a6ef2","Type":"ContainerStarted","Data":"ba9ddd28898cfb8b02b0b3e55804b629fa31eb7b967eba78d143cad64eeb2d52"} Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.141303 4900 generic.go:334] "Generic (PLEG): container finished" podID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" containerID="92a1e7095fb608e0ee1b7302815d6ce5a9164e37e26940f243053074313999fa" exitCode=0 Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.141422 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5sf2" event={"ID":"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4","Type":"ContainerDied","Data":"92a1e7095fb608e0ee1b7302815d6ce5a9164e37e26940f243053074313999fa"} Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.142717 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:41 crc kubenswrapper[4900]: E0127 12:27:41.142885 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:41.642855315 +0000 UTC m=+88.879883525 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.143595 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:41 crc kubenswrapper[4900]: E0127 12:27:41.144330 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:41.644289517 +0000 UTC m=+88.881317727 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.245094 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:41 crc kubenswrapper[4900]: E0127 12:27:41.246826 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:41.7467969 +0000 UTC m=+88.983825110 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.347357 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:41 crc kubenswrapper[4900]: E0127 12:27:41.347817 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:41.84780089 +0000 UTC m=+89.084829110 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.605027 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:41 crc kubenswrapper[4900]: E0127 12:27:41.606137 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.106090856 +0000 UTC m=+89.343119066 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.606419 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:41 crc kubenswrapper[4900]: E0127 12:27:41.608911 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.108877275 +0000 UTC m=+89.345905646 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.709924 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:41 crc kubenswrapper[4900]: E0127 12:27:41.710291 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.210230915 +0000 UTC m=+89.447259135 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.710562 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:41 crc kubenswrapper[4900]: E0127 12:27:41.710971 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.210954306 +0000 UTC m=+89.447982516 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.812382 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:41 crc kubenswrapper[4900]: E0127 12:27:41.812616 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.312584614 +0000 UTC m=+89.549612824 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.812823 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:41 crc kubenswrapper[4900]: E0127 12:27:41.813143 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.31313486 +0000 UTC m=+89.550163070 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.844463 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:41 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:41 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:41 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.844570 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.924912 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:41 crc kubenswrapper[4900]: E0127 12:27:41.926010 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.425870847 +0000 UTC m=+89.662899057 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:41 crc kubenswrapper[4900]: I0127 12:27:41.926228 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:41 crc kubenswrapper[4900]: E0127 12:27:41.926571 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.426558997 +0000 UTC m=+89.663587207 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.028102 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.028525 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.528501124 +0000 UTC m=+89.765529334 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.129268 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.129870 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.629840223 +0000 UTC m=+89.866868433 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.159468 4900 generic.go:334] "Generic (PLEG): container finished" podID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" containerID="af4dfb7e99827cc962992297dfd9b862278233f31bcabed00a1f6164989f589f" exitCode=0 Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.159727 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9s4cn" event={"ID":"93a3d7a3-dddf-49e3-be5b-5369108a5e13","Type":"ContainerDied","Data":"af4dfb7e99827cc962992297dfd9b862278233f31bcabed00a1f6164989f589f"} Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.166198 4900 generic.go:334] "Generic (PLEG): container finished" podID="570146c6-131d-43b7-90f3-93ad768ca7ca" containerID="2ec9494083f01cda29c0cf1998a1583a393fd2401dbfd5dc367d03d6ac43bc64" exitCode=0 Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.166316 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"570146c6-131d-43b7-90f3-93ad768ca7ca","Type":"ContainerDied","Data":"2ec9494083f01cda29c0cf1998a1583a393fd2401dbfd5dc367d03d6ac43bc64"} Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.170573 4900 generic.go:334] "Generic (PLEG): container finished" podID="6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" containerID="21def6407c52b77ddd371a02e16da8659df5db996ee0b1c935dc1cad52d2ed8b" exitCode=0 Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.171115 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5jbr6" event={"ID":"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d","Type":"ContainerDied","Data":"21def6407c52b77ddd371a02e16da8659df5db996ee0b1c935dc1cad52d2ed8b"} Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.173693 4900 generic.go:334] "Generic (PLEG): container finished" podID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" containerID="d6e0a0af1af2784d671b0260deb0bd3553dba90d889d123de5312a7d45de2678" exitCode=0 Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.173755 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r6fg7" event={"ID":"354400fa-dc6c-4435-b9cf-09b5e76a6ef2","Type":"ContainerDied","Data":"d6e0a0af1af2784d671b0260deb0bd3553dba90d889d123de5312a7d45de2678"} Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.177015 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b","Type":"ContainerStarted","Data":"3d3d896152f2a3a4265c6bbc38db3f8212f323f3aa523e02343fc039bfab8910"} Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.231423 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.231779 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.731743739 +0000 UTC m=+89.968771949 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.232501 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.233265 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.733231532 +0000 UTC m=+89.970259742 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.262554 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.262526033 podStartE2EDuration="3.262526033s" podCreationTimestamp="2026-01-27 12:27:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:42.261344089 +0000 UTC m=+89.498372299" watchObservedRunningTime="2026-01-27 12:27:42.262526033 +0000 UTC m=+89.499554243" Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.334596 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.335953 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.835867399 +0000 UTC m=+90.072895619 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.336494 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.337870 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.837833865 +0000 UTC m=+90.074862085 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.439916 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.440342 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.940264546 +0000 UTC m=+90.177292756 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.440661 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.441234 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:42.941208013 +0000 UTC m=+90.178236223 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.542426 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.542723 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:43.042675726 +0000 UTC m=+90.279703936 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.542814 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.543351 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:43.043343586 +0000 UTC m=+90.280371796 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.645630 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.645763 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:43.145743236 +0000 UTC m=+90.382771446 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.646082 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.646547 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:43.146537378 +0000 UTC m=+90.383565588 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.747583 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.747993 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:43.24795903 +0000 UTC m=+90.484987240 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.748249 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.748818 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:43.248797124 +0000 UTC m=+90.485825334 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.842468 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:42 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:42 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:42 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.842544 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:42 crc kubenswrapper[4900]: I0127 12:27:42.849153 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:42 crc kubenswrapper[4900]: E0127 12:27:42.849634 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:43.349611749 +0000 UTC m=+90.586639959 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:42.950700 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:43 crc kubenswrapper[4900]: E0127 12:27:42.951227 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:43.451205926 +0000 UTC m=+90.688234136 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.155839 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:43 crc kubenswrapper[4900]: E0127 12:27:43.156586 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:43.656552242 +0000 UTC m=+90.893580452 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.161258 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:43 crc kubenswrapper[4900]: E0127 12:27:43.162237 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:43.662212214 +0000 UTC m=+90.899240424 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.325468 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:43 crc kubenswrapper[4900]: E0127 12:27:43.326245 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:43.826213653 +0000 UTC m=+91.063241863 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.357512 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" event={"ID":"ddf9e91c-0239-4d06-af1e-9ef7d22e048a","Type":"ContainerStarted","Data":"742667b41aa8a2dc1351654988ff7ffb61a6c028d06b82a58b1dc739dd54387b"} Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.360810 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-7gwzm" event={"ID":"b6e155fd-bee9-4c32-9919-0dbee597003e","Type":"ContainerStarted","Data":"2d860379ee14ecf67bab9f6f37a0e41ef749a4069adcb0efadc3be2870dd1e7a"} Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.364162 4900 generic.go:334] "Generic (PLEG): container finished" podID="160d7119-622e-4998-a7f9-7f6a362cb5a2" containerID="7e78daa90011f87779c591f3167107cc00ef05528e67ef1fd7f2c517b30b6f33" exitCode=0 Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.365559 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7sss" event={"ID":"160d7119-622e-4998-a7f9-7f6a362cb5a2","Type":"ContainerDied","Data":"7e78daa90011f87779c591f3167107cc00ef05528e67ef1fd7f2c517b30b6f33"} Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.373172 4900 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.425401 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-7gwzm" podStartSLOduration=52.425361399 podStartE2EDuration="52.425361399s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:43.424877925 +0000 UTC m=+90.661906135" watchObservedRunningTime="2026-01-27 12:27:43.425361399 +0000 UTC m=+90.662389609" Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.428801 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:43 crc kubenswrapper[4900]: E0127 12:27:43.429627 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:43.929602851 +0000 UTC m=+91.166631071 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.564706 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:43 crc kubenswrapper[4900]: E0127 12:27:43.565317 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:44.065273047 +0000 UTC m=+91.302301287 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.594867 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.600814 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-vrhjw" Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.754319 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:43 crc kubenswrapper[4900]: E0127 12:27:43.764812 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:44.264793755 +0000 UTC m=+91.501821965 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.845903 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:43 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:43 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:43 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.846012 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.866095 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:43 crc kubenswrapper[4900]: E0127 12:27:43.866506 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:44.366404243 +0000 UTC m=+91.603432453 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.866640 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:43 crc kubenswrapper[4900]: E0127 12:27:43.867144 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:44.367124683 +0000 UTC m=+91.604152893 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.967834 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:43 crc kubenswrapper[4900]: E0127 12:27:43.968079 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:44.46802639 +0000 UTC m=+91.705054600 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:43 crc kubenswrapper[4900]: I0127 12:27:43.974420 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:43 crc kubenswrapper[4900]: E0127 12:27:43.974970 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:44.474945619 +0000 UTC m=+91.711973829 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.002204 4900 patch_prober.go:28] interesting pod/console-f9d7485db-5zbpj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.002296 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-5zbpj" podUID="6893cb7a-209f-4822-9e82-34ad39c7647f" containerName="console" probeResult="failure" output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.076334 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:44 crc kubenswrapper[4900]: E0127 12:27:44.094255 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:44.594212342 +0000 UTC m=+91.831240552 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.118305 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.118398 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.118463 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.118542 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.196102 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:44 crc kubenswrapper[4900]: E0127 12:27:44.197105 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:44.697085536 +0000 UTC m=+91.934113746 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.247868 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.296959 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/570146c6-131d-43b7-90f3-93ad768ca7ca-kube-api-access\") pod \"570146c6-131d-43b7-90f3-93ad768ca7ca\" (UID: \"570146c6-131d-43b7-90f3-93ad768ca7ca\") " Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.297623 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/570146c6-131d-43b7-90f3-93ad768ca7ca-kubelet-dir\") pod \"570146c6-131d-43b7-90f3-93ad768ca7ca\" (UID: \"570146c6-131d-43b7-90f3-93ad768ca7ca\") " Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.297774 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/570146c6-131d-43b7-90f3-93ad768ca7ca-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "570146c6-131d-43b7-90f3-93ad768ca7ca" (UID: "570146c6-131d-43b7-90f3-93ad768ca7ca"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.297826 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:44 crc kubenswrapper[4900]: E0127 12:27:44.297997 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-27 12:27:44.797971452 +0000 UTC m=+92.034999662 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.298229 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.298329 4900 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/570146c6-131d-43b7-90f3-93ad768ca7ca-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 27 12:27:44 crc kubenswrapper[4900]: E0127 12:27:44.298677 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-27 12:27:44.798662142 +0000 UTC m=+92.035690352 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-cr4gz" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.325634 4900 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-27T12:27:43.373214872Z","Handler":null,"Name":""} Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.340293 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/570146c6-131d-43b7-90f3-93ad768ca7ca-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "570146c6-131d-43b7-90f3-93ad768ca7ca" (UID: "570146c6-131d-43b7-90f3-93ad768ca7ca"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.349948 4900 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.350035 4900 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.397478 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" event={"ID":"ddf9e91c-0239-4d06-af1e-9ef7d22e048a","Type":"ContainerStarted","Data":"1112278f1ec664662e647bd76c4ae1cf210297ea4cd969e98c2ccb0ea72fa337"} Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.399675 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.400188 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/570146c6-131d-43b7-90f3-93ad768ca7ca-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.400982 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"570146c6-131d-43b7-90f3-93ad768ca7ca","Type":"ContainerDied","Data":"ee79859ee70ed4d2e27a19a52eebca8554d82e55aa597d3428d52648190e0644"} Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.401122 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee79859ee70ed4d2e27a19a52eebca8554d82e55aa597d3428d52648190e0644" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.401025 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.403864 4900 generic.go:334] "Generic (PLEG): container finished" podID="ea673ed9-5530-4b5d-8997-403f903d27a6" containerID="640d5ad067ae99ae44c0d01c68c5fe1ce407c89a12675f519764cbcb91097de9" exitCode=0 Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.404004 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" event={"ID":"ea673ed9-5530-4b5d-8997-403f903d27a6","Type":"ContainerDied","Data":"640d5ad067ae99ae44c0d01c68c5fe1ce407c89a12675f519764cbcb91097de9"} Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.405311 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.502009 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.510832 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.519394 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.519454 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.697755 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-cr4gz\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.739072 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.819477 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.834433 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-x96hr" Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.845167 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:44 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:44 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:44 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:44 crc kubenswrapper[4900]: I0127 12:27:44.845257 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:45 crc kubenswrapper[4900]: I0127 12:27:45.349522 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-cr4gz"] Jan 27 12:27:45 crc kubenswrapper[4900]: I0127 12:27:45.435212 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" event={"ID":"96450fdc-fc86-4fc2-a04d-cfe29f04aa18","Type":"ContainerStarted","Data":"98ed5745a98fb92854854e59b2c63df8354947b8e5e23d6dd7ad9d7e747d8ae5"} Jan 27 12:27:45 crc kubenswrapper[4900]: I0127 12:27:45.462869 4900 generic.go:334] "Generic (PLEG): container finished" podID="1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b" containerID="3d3d896152f2a3a4265c6bbc38db3f8212f323f3aa523e02343fc039bfab8910" exitCode=0 Jan 27 12:27:45 crc kubenswrapper[4900]: I0127 12:27:45.463100 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b","Type":"ContainerDied","Data":"3d3d896152f2a3a4265c6bbc38db3f8212f323f3aa523e02343fc039bfab8910"} Jan 27 12:27:45 crc kubenswrapper[4900]: I0127 12:27:45.476693 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" event={"ID":"ddf9e91c-0239-4d06-af1e-9ef7d22e048a","Type":"ContainerStarted","Data":"0f870ef3b9f9e44498d9a06fe833fc290de7d1c0f3ba85f5b6683957e8e273ae"} Jan 27 12:27:45 crc kubenswrapper[4900]: I0127 12:27:45.539410 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" podStartSLOduration=23.539383015 podStartE2EDuration="23.539383015s" podCreationTimestamp="2026-01-27 12:27:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:45.537300575 +0000 UTC m=+92.774328785" watchObservedRunningTime="2026-01-27 12:27:45.539383015 +0000 UTC m=+92.776411225" Jan 27 12:27:45 crc kubenswrapper[4900]: I0127 12:27:45.849376 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:45 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:45 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:45 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:45 crc kubenswrapper[4900]: I0127 12:27:45.849942 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.048933 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" Jan 27 12:27:46 crc kubenswrapper[4900]: E0127 12:27:46.091429 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:27:46 crc kubenswrapper[4900]: E0127 12:27:46.093200 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:27:46 crc kubenswrapper[4900]: E0127 12:27:46.094703 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:27:46 crc kubenswrapper[4900]: E0127 12:27:46.094740 4900 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" podUID="1a5d7b57-de98-445e-83b6-1ff0eb859e01" containerName="kube-multus-additional-cni-plugins" Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.181949 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxrsb\" (UniqueName: \"kubernetes.io/projected/ea673ed9-5530-4b5d-8997-403f903d27a6-kube-api-access-zxrsb\") pod \"ea673ed9-5530-4b5d-8997-403f903d27a6\" (UID: \"ea673ed9-5530-4b5d-8997-403f903d27a6\") " Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.182027 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ea673ed9-5530-4b5d-8997-403f903d27a6-secret-volume\") pod \"ea673ed9-5530-4b5d-8997-403f903d27a6\" (UID: \"ea673ed9-5530-4b5d-8997-403f903d27a6\") " Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.182283 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ea673ed9-5530-4b5d-8997-403f903d27a6-config-volume\") pod \"ea673ed9-5530-4b5d-8997-403f903d27a6\" (UID: \"ea673ed9-5530-4b5d-8997-403f903d27a6\") " Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.182816 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea673ed9-5530-4b5d-8997-403f903d27a6-config-volume" (OuterVolumeSpecName: "config-volume") pod "ea673ed9-5530-4b5d-8997-403f903d27a6" (UID: "ea673ed9-5530-4b5d-8997-403f903d27a6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.188594 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea673ed9-5530-4b5d-8997-403f903d27a6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ea673ed9-5530-4b5d-8997-403f903d27a6" (UID: "ea673ed9-5530-4b5d-8997-403f903d27a6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.188658 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea673ed9-5530-4b5d-8997-403f903d27a6-kube-api-access-zxrsb" (OuterVolumeSpecName: "kube-api-access-zxrsb") pod "ea673ed9-5530-4b5d-8997-403f903d27a6" (UID: "ea673ed9-5530-4b5d-8997-403f903d27a6"). InnerVolumeSpecName "kube-api-access-zxrsb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.284406 4900 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ea673ed9-5530-4b5d-8997-403f903d27a6-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.284483 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxrsb\" (UniqueName: \"kubernetes.io/projected/ea673ed9-5530-4b5d-8997-403f903d27a6-kube-api-access-zxrsb\") on node \"crc\" DevicePath \"\"" Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.284499 4900 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ea673ed9-5530-4b5d-8997-403f903d27a6-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.804142 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" event={"ID":"96450fdc-fc86-4fc2-a04d-cfe29f04aa18","Type":"ContainerStarted","Data":"40afbebb1483efe545203adc853e830beec6ce509567b405405a7831845ecef7"} Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.806175 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.809522 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.810911 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9" event={"ID":"ea673ed9-5530-4b5d-8997-403f903d27a6","Type":"ContainerDied","Data":"3f33329316be21f2d3e524bd5d9a494cca94438625a223a05e80175877036d27"} Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.810970 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f33329316be21f2d3e524bd5d9a494cca94438625a223a05e80175877036d27" Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.843206 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:46 crc kubenswrapper[4900]: [-]has-synced failed: reason withheld Jan 27 12:27:46 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:46 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.843285 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:46 crc kubenswrapper[4900]: I0127 12:27:46.863864 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" podStartSLOduration=55.863838342 podStartE2EDuration="55.863838342s" podCreationTimestamp="2026-01-27 12:26:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:27:46.860558068 +0000 UTC m=+94.097586278" watchObservedRunningTime="2026-01-27 12:27:46.863838342 +0000 UTC m=+94.100866552" Jan 27 12:27:47 crc kubenswrapper[4900]: I0127 12:27:47.271498 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 12:27:47 crc kubenswrapper[4900]: I0127 12:27:47.437016 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b-kube-api-access\") pod \"1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b\" (UID: \"1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b\") " Jan 27 12:27:47 crc kubenswrapper[4900]: I0127 12:27:47.437154 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b-kubelet-dir\") pod \"1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b\" (UID: \"1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b\") " Jan 27 12:27:47 crc kubenswrapper[4900]: I0127 12:27:47.437834 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b" (UID: "1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:27:47 crc kubenswrapper[4900]: I0127 12:27:47.438789 4900 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 27 12:27:47 crc kubenswrapper[4900]: I0127 12:27:47.450599 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b" (UID: "1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:27:47 crc kubenswrapper[4900]: I0127 12:27:47.540586 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 12:27:47 crc kubenswrapper[4900]: I0127 12:27:47.828782 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b","Type":"ContainerDied","Data":"590fa4f0926bd0243f0e91762bd13adc6d206d16f7464d15f60c93c67c467a3a"} Jan 27 12:27:47 crc kubenswrapper[4900]: I0127 12:27:47.828841 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 27 12:27:47 crc kubenswrapper[4900]: I0127 12:27:47.828867 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="590fa4f0926bd0243f0e91762bd13adc6d206d16f7464d15f60c93c67c467a3a" Jan 27 12:27:47 crc kubenswrapper[4900]: I0127 12:27:47.845023 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 27 12:27:47 crc kubenswrapper[4900]: [+]has-synced ok Jan 27 12:27:47 crc kubenswrapper[4900]: [+]process-running ok Jan 27 12:27:47 crc kubenswrapper[4900]: healthz check failed Jan 27 12:27:47 crc kubenswrapper[4900]: I0127 12:27:47.845133 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 12:27:48 crc kubenswrapper[4900]: I0127 12:27:48.852581 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:48 crc kubenswrapper[4900]: I0127 12:27:48.876722 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 12:27:53 crc kubenswrapper[4900]: I0127 12:27:53.568961 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:53 crc kubenswrapper[4900]: I0127 12:27:53.569374 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:53 crc kubenswrapper[4900]: I0127 12:27:53.570728 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:53 crc kubenswrapper[4900]: I0127 12:27:53.587242 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:53 crc kubenswrapper[4900]: I0127 12:27:53.812532 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 27 12:27:53 crc kubenswrapper[4900]: I0127 12:27:53.875449 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:53 crc kubenswrapper[4900]: I0127 12:27:53.875528 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:53 crc kubenswrapper[4900]: I0127 12:27:53.882439 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:53 crc kubenswrapper[4900]: I0127 12:27:53.885207 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:53 crc kubenswrapper[4900]: I0127 12:27:53.994538 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:53 crc kubenswrapper[4900]: I0127 12:27:53.999126 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:27:54 crc kubenswrapper[4900]: I0127 12:27:54.101718 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:27:54 crc kubenswrapper[4900]: I0127 12:27:54.120205 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:27:54 crc kubenswrapper[4900]: I0127 12:27:54.120619 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:27:54 crc kubenswrapper[4900]: I0127 12:27:54.120371 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:27:54 crc kubenswrapper[4900]: I0127 12:27:54.120717 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-zbp4l" Jan 27 12:27:54 crc kubenswrapper[4900]: I0127 12:27:54.120779 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:27:54 crc kubenswrapper[4900]: I0127 12:27:54.122456 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:27:54 crc kubenswrapper[4900]: I0127 12:27:54.122499 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:27:54 crc kubenswrapper[4900]: I0127 12:27:54.123229 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"4c2a78bc02f54661dc21c1c5a98c1a4ed695bc4feb4be9ce0990c10fee1a9331"} pod="openshift-console/downloads-7954f5f757-zbp4l" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 27 12:27:54 crc kubenswrapper[4900]: I0127 12:27:54.123417 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" containerID="cri-o://4c2a78bc02f54661dc21c1c5a98c1a4ed695bc4feb4be9ce0990c10fee1a9331" gracePeriod=2 Jan 27 12:27:54 crc kubenswrapper[4900]: I0127 12:27:54.199835 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 27 12:27:55 crc kubenswrapper[4900]: I0127 12:27:55.122783 4900 generic.go:334] "Generic (PLEG): container finished" podID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerID="4c2a78bc02f54661dc21c1c5a98c1a4ed695bc4feb4be9ce0990c10fee1a9331" exitCode=0 Jan 27 12:27:55 crc kubenswrapper[4900]: I0127 12:27:55.122856 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-zbp4l" event={"ID":"75d8fa11-eb06-4aae-8e96-3bb4328d69d7","Type":"ContainerDied","Data":"4c2a78bc02f54661dc21c1c5a98c1a4ed695bc4feb4be9ce0990c10fee1a9331"} Jan 27 12:27:56 crc kubenswrapper[4900]: E0127 12:27:56.095488 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:27:56 crc kubenswrapper[4900]: E0127 12:27:56.112465 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:27:56 crc kubenswrapper[4900]: E0127 12:27:56.115309 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:27:56 crc kubenswrapper[4900]: E0127 12:27:56.115378 4900 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" podUID="1a5d7b57-de98-445e-83b6-1ff0eb859e01" containerName="kube-multus-additional-cni-plugins" Jan 27 12:28:04 crc kubenswrapper[4900]: I0127 12:28:04.119120 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:28:04 crc kubenswrapper[4900]: I0127 12:28:04.120209 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:28:04 crc kubenswrapper[4900]: I0127 12:28:04.941174 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:28:05 crc kubenswrapper[4900]: I0127 12:28:05.935890 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" Jan 27 12:28:06 crc kubenswrapper[4900]: E0127 12:28:06.094920 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:06 crc kubenswrapper[4900]: E0127 12:28:06.096202 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:06 crc kubenswrapper[4900]: E0127 12:28:06.098175 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:06 crc kubenswrapper[4900]: E0127 12:28:06.098216 4900 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" podUID="1a5d7b57-de98-445e-83b6-1ff0eb859e01" containerName="kube-multus-additional-cni-plugins" Jan 27 12:28:08 crc kubenswrapper[4900]: I0127 12:28:08.319038 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-clhb8_1a5d7b57-de98-445e-83b6-1ff0eb859e01/kube-multus-additional-cni-plugins/0.log" Jan 27 12:28:08 crc kubenswrapper[4900]: I0127 12:28:08.319727 4900 generic.go:334] "Generic (PLEG): container finished" podID="1a5d7b57-de98-445e-83b6-1ff0eb859e01" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" exitCode=137 Jan 27 12:28:08 crc kubenswrapper[4900]: I0127 12:28:08.319788 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" event={"ID":"1a5d7b57-de98-445e-83b6-1ff0eb859e01","Type":"ContainerDied","Data":"29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466"} Jan 27 12:28:08 crc kubenswrapper[4900]: I0127 12:28:08.577018 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 27 12:28:14 crc kubenswrapper[4900]: I0127 12:28:14.118667 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:28:14 crc kubenswrapper[4900]: I0127 12:28:14.119020 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:28:16 crc kubenswrapper[4900]: E0127 12:28:16.089590 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:16 crc kubenswrapper[4900]: E0127 12:28:16.090743 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:16 crc kubenswrapper[4900]: E0127 12:28:16.091129 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:16 crc kubenswrapper[4900]: E0127 12:28:16.091189 4900 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" podUID="1a5d7b57-de98-445e-83b6-1ff0eb859e01" containerName="kube-multus-additional-cni-plugins" Jan 27 12:28:16 crc kubenswrapper[4900]: I0127 12:28:16.516736 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=8.516702307 podStartE2EDuration="8.516702307s" podCreationTimestamp="2026-01-27 12:28:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:28:16.516204933 +0000 UTC m=+123.753233183" watchObservedRunningTime="2026-01-27 12:28:16.516702307 +0000 UTC m=+123.753730517" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.429969 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 27 12:28:17 crc kubenswrapper[4900]: E0127 12:28:17.430406 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b" containerName="pruner" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.430431 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b" containerName="pruner" Jan 27 12:28:17 crc kubenswrapper[4900]: E0127 12:28:17.430458 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea673ed9-5530-4b5d-8997-403f903d27a6" containerName="collect-profiles" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.430500 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea673ed9-5530-4b5d-8997-403f903d27a6" containerName="collect-profiles" Jan 27 12:28:17 crc kubenswrapper[4900]: E0127 12:28:17.430518 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="570146c6-131d-43b7-90f3-93ad768ca7ca" containerName="pruner" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.430528 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="570146c6-131d-43b7-90f3-93ad768ca7ca" containerName="pruner" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.430711 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea673ed9-5530-4b5d-8997-403f903d27a6" containerName="collect-profiles" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.430755 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c42c0b8-1f2b-4333-a3e6-cfe917b00d5b" containerName="pruner" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.430769 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="570146c6-131d-43b7-90f3-93ad768ca7ca" containerName="pruner" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.431404 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.435892 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.436269 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.439815 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.446309 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a207d82c-195b-44f5-bca9-fa0d029efa8a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a207d82c-195b-44f5-bca9-fa0d029efa8a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.446355 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a207d82c-195b-44f5-bca9-fa0d029efa8a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a207d82c-195b-44f5-bca9-fa0d029efa8a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.548682 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a207d82c-195b-44f5-bca9-fa0d029efa8a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a207d82c-195b-44f5-bca9-fa0d029efa8a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.548807 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a207d82c-195b-44f5-bca9-fa0d029efa8a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a207d82c-195b-44f5-bca9-fa0d029efa8a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.549162 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a207d82c-195b-44f5-bca9-fa0d029efa8a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a207d82c-195b-44f5-bca9-fa0d029efa8a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.575020 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a207d82c-195b-44f5-bca9-fa0d029efa8a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a207d82c-195b-44f5-bca9-fa0d029efa8a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 12:28:17 crc kubenswrapper[4900]: I0127 12:28:17.746927 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 12:28:22 crc kubenswrapper[4900]: I0127 12:28:22.826038 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 27 12:28:22 crc kubenswrapper[4900]: I0127 12:28:22.830784 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 27 12:28:22 crc kubenswrapper[4900]: I0127 12:28:22.841564 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 27 12:28:22 crc kubenswrapper[4900]: I0127 12:28:22.926021 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/465537a5-84fe-4c45-ac00-337055e94686-var-lock\") pod \"installer-9-crc\" (UID: \"465537a5-84fe-4c45-ac00-337055e94686\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 12:28:22 crc kubenswrapper[4900]: I0127 12:28:22.926138 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/465537a5-84fe-4c45-ac00-337055e94686-kubelet-dir\") pod \"installer-9-crc\" (UID: \"465537a5-84fe-4c45-ac00-337055e94686\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 12:28:22 crc kubenswrapper[4900]: I0127 12:28:22.926171 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/465537a5-84fe-4c45-ac00-337055e94686-kube-api-access\") pod \"installer-9-crc\" (UID: \"465537a5-84fe-4c45-ac00-337055e94686\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 12:28:23 crc kubenswrapper[4900]: I0127 12:28:23.028700 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/465537a5-84fe-4c45-ac00-337055e94686-var-lock\") pod \"installer-9-crc\" (UID: \"465537a5-84fe-4c45-ac00-337055e94686\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 12:28:23 crc kubenswrapper[4900]: I0127 12:28:23.028789 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/465537a5-84fe-4c45-ac00-337055e94686-kubelet-dir\") pod \"installer-9-crc\" (UID: \"465537a5-84fe-4c45-ac00-337055e94686\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 12:28:23 crc kubenswrapper[4900]: I0127 12:28:23.028839 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/465537a5-84fe-4c45-ac00-337055e94686-kube-api-access\") pod \"installer-9-crc\" (UID: \"465537a5-84fe-4c45-ac00-337055e94686\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 12:28:23 crc kubenswrapper[4900]: I0127 12:28:23.029437 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/465537a5-84fe-4c45-ac00-337055e94686-var-lock\") pod \"installer-9-crc\" (UID: \"465537a5-84fe-4c45-ac00-337055e94686\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 12:28:23 crc kubenswrapper[4900]: I0127 12:28:23.029502 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/465537a5-84fe-4c45-ac00-337055e94686-kubelet-dir\") pod \"installer-9-crc\" (UID: \"465537a5-84fe-4c45-ac00-337055e94686\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 12:28:23 crc kubenswrapper[4900]: I0127 12:28:23.056934 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/465537a5-84fe-4c45-ac00-337055e94686-kube-api-access\") pod \"installer-9-crc\" (UID: \"465537a5-84fe-4c45-ac00-337055e94686\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 27 12:28:23 crc kubenswrapper[4900]: I0127 12:28:23.167957 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 27 12:28:24 crc kubenswrapper[4900]: I0127 12:28:24.124379 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:28:24 crc kubenswrapper[4900]: I0127 12:28:24.124462 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:28:26 crc kubenswrapper[4900]: E0127 12:28:26.090366 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:26 crc kubenswrapper[4900]: E0127 12:28:26.091392 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:26 crc kubenswrapper[4900]: E0127 12:28:26.092002 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:26 crc kubenswrapper[4900]: E0127 12:28:26.092078 4900 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" podUID="1a5d7b57-de98-445e-83b6-1ff0eb859e01" containerName="kube-multus-additional-cni-plugins" Jan 27 12:28:27 crc kubenswrapper[4900]: E0127 12:28:27.664683 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage257222855/2\": happened during read: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 27 12:28:27 crc kubenswrapper[4900]: E0127 12:28:27.665610 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7cdzt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-p7sss_openshift-marketplace(160d7119-622e-4998-a7f9-7f6a362cb5a2): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \"/var/tmp/container_images_storage257222855/2\": happened during read: context canceled" logger="UnhandledError" Jan 27 12:28:27 crc kubenswrapper[4900]: E0127 12:28:27.666898 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: writing blob: storing blob to file \\\"/var/tmp/container_images_storage257222855/2\\\": happened during read: context canceled\"" pod="openshift-marketplace/redhat-operators-p7sss" podUID="160d7119-622e-4998-a7f9-7f6a362cb5a2" Jan 27 12:28:34 crc kubenswrapper[4900]: I0127 12:28:34.118181 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:28:34 crc kubenswrapper[4900]: I0127 12:28:34.118811 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:28:36 crc kubenswrapper[4900]: E0127 12:28:36.089493 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:36 crc kubenswrapper[4900]: E0127 12:28:36.090697 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:36 crc kubenswrapper[4900]: E0127 12:28:36.091672 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:36 crc kubenswrapper[4900]: E0127 12:28:36.091839 4900 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" podUID="1a5d7b57-de98-445e-83b6-1ff0eb859e01" containerName="kube-multus-additional-cni-plugins" Jan 27 12:28:40 crc kubenswrapper[4900]: E0127 12:28:40.834804 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 27 12:28:40 crc kubenswrapper[4900]: E0127 12:28:40.835444 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qzvjg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-5jbr6_openshift-marketplace(6cc0d6e5-4b91-4226-99a2-165e8bea6a7d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 12:28:40 crc kubenswrapper[4900]: E0127 12:28:40.836703 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-5jbr6" podUID="6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" Jan 27 12:28:44 crc kubenswrapper[4900]: I0127 12:28:44.117743 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:28:44 crc kubenswrapper[4900]: I0127 12:28:44.118202 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:28:45 crc kubenswrapper[4900]: E0127 12:28:45.636955 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 27 12:28:45 crc kubenswrapper[4900]: E0127 12:28:45.637205 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rdhgd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-r6fg7_openshift-marketplace(354400fa-dc6c-4435-b9cf-09b5e76a6ef2): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 12:28:45 crc kubenswrapper[4900]: E0127 12:28:45.638368 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-r6fg7" podUID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" Jan 27 12:28:46 crc kubenswrapper[4900]: E0127 12:28:46.089638 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:46 crc kubenswrapper[4900]: E0127 12:28:46.090361 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:46 crc kubenswrapper[4900]: E0127 12:28:46.090723 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:46 crc kubenswrapper[4900]: E0127 12:28:46.090771 4900 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" podUID="1a5d7b57-de98-445e-83b6-1ff0eb859e01" containerName="kube-multus-additional-cni-plugins" Jan 27 12:28:46 crc kubenswrapper[4900]: E0127 12:28:46.638362 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-r6fg7" podUID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" Jan 27 12:28:46 crc kubenswrapper[4900]: E0127 12:28:46.638396 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-5jbr6" podUID="6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" Jan 27 12:28:50 crc kubenswrapper[4900]: E0127 12:28:50.355430 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 27 12:28:50 crc kubenswrapper[4900]: E0127 12:28:50.356501 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wn6nj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-grrcf_openshift-marketplace(754ddb47-7891-4142-b52c-98a29ca40078): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 12:28:50 crc kubenswrapper[4900]: E0127 12:28:50.357766 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-grrcf" podUID="754ddb47-7891-4142-b52c-98a29ca40078" Jan 27 12:28:54 crc kubenswrapper[4900]: I0127 12:28:54.118161 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:28:54 crc kubenswrapper[4900]: I0127 12:28:54.118743 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:28:55 crc kubenswrapper[4900]: E0127 12:28:55.619546 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-grrcf" podUID="754ddb47-7891-4142-b52c-98a29ca40078" Jan 27 12:28:55 crc kubenswrapper[4900]: E0127 12:28:55.689138 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 27 12:28:55 crc kubenswrapper[4900]: E0127 12:28:55.689721 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fptvd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-n9dph_openshift-marketplace(79e4aadf-f1cf-44f9-af87-a3a4b1e06c57): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 12:28:55 crc kubenswrapper[4900]: E0127 12:28:55.691234 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-n9dph" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" Jan 27 12:28:55 crc kubenswrapper[4900]: E0127 12:28:55.846729 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 27 12:28:55 crc kubenswrapper[4900]: E0127 12:28:55.847590 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-spv6t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-9s4cn_openshift-marketplace(93a3d7a3-dddf-49e3-be5b-5369108a5e13): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 12:28:55 crc kubenswrapper[4900]: E0127 12:28:55.848827 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-9s4cn" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" Jan 27 12:28:56 crc kubenswrapper[4900]: E0127 12:28:56.088792 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:56 crc kubenswrapper[4900]: E0127 12:28:56.089575 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:56 crc kubenswrapper[4900]: E0127 12:28:56.090314 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 27 12:28:56 crc kubenswrapper[4900]: E0127 12:28:56.090449 4900 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466 is running failed: container process not found" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" podUID="1a5d7b57-de98-445e-83b6-1ff0eb859e01" containerName="kube-multus-additional-cni-plugins" Jan 27 12:28:57 crc kubenswrapper[4900]: E0127 12:28:57.502695 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-9s4cn" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" Jan 27 12:28:57 crc kubenswrapper[4900]: E0127 12:28:57.502799 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-n9dph" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" Jan 27 12:28:57 crc kubenswrapper[4900]: E0127 12:28:57.562397 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 27 12:28:57 crc kubenswrapper[4900]: E0127 12:28:57.562711 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8582r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-q5sf2_openshift-marketplace(c7ff526b-ed0d-42d2-98bf-2f237d76cbf4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 12:28:57 crc kubenswrapper[4900]: E0127 12:28:57.563841 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-q5sf2" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.645326 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-clhb8_1a5d7b57-de98-445e-83b6-1ff0eb859e01/kube-multus-additional-cni-plugins/0.log" Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.645444 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.743098 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/1a5d7b57-de98-445e-83b6-1ff0eb859e01-ready\") pod \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.744181 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a5d7b57-de98-445e-83b6-1ff0eb859e01-ready" (OuterVolumeSpecName: "ready") pod "1a5d7b57-de98-445e-83b6-1ff0eb859e01" (UID: "1a5d7b57-de98-445e-83b6-1ff0eb859e01"). InnerVolumeSpecName "ready". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.744728 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8fq8k\" (UniqueName: \"kubernetes.io/projected/1a5d7b57-de98-445e-83b6-1ff0eb859e01-kube-api-access-8fq8k\") pod \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.744950 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1a5d7b57-de98-445e-83b6-1ff0eb859e01-tuning-conf-dir\") pod \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.745011 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/1a5d7b57-de98-445e-83b6-1ff0eb859e01-cni-sysctl-allowlist\") pod \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\" (UID: \"1a5d7b57-de98-445e-83b6-1ff0eb859e01\") " Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.745183 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a5d7b57-de98-445e-83b6-1ff0eb859e01-tuning-conf-dir" (OuterVolumeSpecName: "tuning-conf-dir") pod "1a5d7b57-de98-445e-83b6-1ff0eb859e01" (UID: "1a5d7b57-de98-445e-83b6-1ff0eb859e01"). InnerVolumeSpecName "tuning-conf-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.746267 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a5d7b57-de98-445e-83b6-1ff0eb859e01-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "1a5d7b57-de98-445e-83b6-1ff0eb859e01" (UID: "1a5d7b57-de98-445e-83b6-1ff0eb859e01"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.746805 4900 reconciler_common.go:293] "Volume detached for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1a5d7b57-de98-445e-83b6-1ff0eb859e01-tuning-conf-dir\") on node \"crc\" DevicePath \"\"" Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.746829 4900 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/1a5d7b57-de98-445e-83b6-1ff0eb859e01-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.746843 4900 reconciler_common.go:293] "Volume detached for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/1a5d7b57-de98-445e-83b6-1ff0eb859e01-ready\") on node \"crc\" DevicePath \"\"" Jan 27 12:28:57 crc kubenswrapper[4900]: E0127 12:28:57.762216 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 27 12:28:57 crc kubenswrapper[4900]: E0127 12:28:57.762398 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rkh6z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-jvcrt_openshift-marketplace(86867c21-fd52-44e7-85d2-87da48322397): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.762888 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a5d7b57-de98-445e-83b6-1ff0eb859e01-kube-api-access-8fq8k" (OuterVolumeSpecName: "kube-api-access-8fq8k") pod "1a5d7b57-de98-445e-83b6-1ff0eb859e01" (UID: "1a5d7b57-de98-445e-83b6-1ff0eb859e01"). InnerVolumeSpecName "kube-api-access-8fq8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:28:57 crc kubenswrapper[4900]: E0127 12:28:57.763695 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-jvcrt" podUID="86867c21-fd52-44e7-85d2-87da48322397" Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.802035 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-clhb8_1a5d7b57-de98-445e-83b6-1ff0eb859e01/kube-multus-additional-cni-plugins/0.log" Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.803646 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.803679 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-clhb8" event={"ID":"1a5d7b57-de98-445e-83b6-1ff0eb859e01","Type":"ContainerDied","Data":"028e3f1d56981b93660996246759f07b7acb59134e8d4a7fc20c36dd7b477781"} Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.803797 4900 scope.go:117] "RemoveContainer" containerID="29d9e90315ee0d93f462693d935be5a3c005de92b4c24195a2bb8b4ae4379466" Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.848410 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8fq8k\" (UniqueName: \"kubernetes.io/projected/1a5d7b57-de98-445e-83b6-1ff0eb859e01-kube-api-access-8fq8k\") on node \"crc\" DevicePath \"\"" Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.863871 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-clhb8"] Jan 27 12:28:57 crc kubenswrapper[4900]: I0127 12:28:57.867827 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-clhb8"] Jan 27 12:28:58 crc kubenswrapper[4900]: I0127 12:28:58.493557 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a5d7b57-de98-445e-83b6-1ff0eb859e01" path="/var/lib/kubelet/pods/1a5d7b57-de98-445e-83b6-1ff0eb859e01/volumes" Jan 27 12:28:59 crc kubenswrapper[4900]: E0127 12:28:59.012860 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-jvcrt" podUID="86867c21-fd52-44e7-85d2-87da48322397" Jan 27 12:28:59 crc kubenswrapper[4900]: W0127 12:28:59.013095 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-a6886a8c136a22a28193ce8eae2107c6b2741c3ec08b2936b7580df2b029421b WatchSource:0}: Error finding container a6886a8c136a22a28193ce8eae2107c6b2741c3ec08b2936b7580df2b029421b: Status 404 returned error can't find the container with id a6886a8c136a22a28193ce8eae2107c6b2741c3ec08b2936b7580df2b029421b Jan 27 12:28:59 crc kubenswrapper[4900]: W0127 12:28:59.016492 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-3c266b18598dc79c2a8e0da202204f0c66a8e49e089a78ab441690be6fbff367 WatchSource:0}: Error finding container 3c266b18598dc79c2a8e0da202204f0c66a8e49e089a78ab441690be6fbff367: Status 404 returned error can't find the container with id 3c266b18598dc79c2a8e0da202204f0c66a8e49e089a78ab441690be6fbff367 Jan 27 12:28:59 crc kubenswrapper[4900]: I0127 12:28:59.603400 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 27 12:28:59 crc kubenswrapper[4900]: I0127 12:28:59.691820 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 27 12:28:59 crc kubenswrapper[4900]: W0127 12:28:59.734799 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod465537a5_84fe_4c45_ac00_337055e94686.slice/crio-55f63688c2a95c666c8c1a615faff1d4b8654921d57ea2aa1dfc5095bbb8d4f0 WatchSource:0}: Error finding container 55f63688c2a95c666c8c1a615faff1d4b8654921d57ea2aa1dfc5095bbb8d4f0: Status 404 returned error can't find the container with id 55f63688c2a95c666c8c1a615faff1d4b8654921d57ea2aa1dfc5095bbb8d4f0 Jan 27 12:28:59 crc kubenswrapper[4900]: I0127 12:28:59.817858 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"3c266b18598dc79c2a8e0da202204f0c66a8e49e089a78ab441690be6fbff367"} Jan 27 12:28:59 crc kubenswrapper[4900]: I0127 12:28:59.820579 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"246e1073853af68dc31d8315e1883691b3765157830c4ee13a38765be972864e"} Jan 27 12:28:59 crc kubenswrapper[4900]: I0127 12:28:59.824635 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-zbp4l" event={"ID":"75d8fa11-eb06-4aae-8e96-3bb4328d69d7","Type":"ContainerStarted","Data":"c5de183b802fabbd9cd34387612b723ecf2d6162c246c097abe251324a00062e"} Jan 27 12:28:59 crc kubenswrapper[4900]: I0127 12:28:59.826252 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-zbp4l" Jan 27 12:28:59 crc kubenswrapper[4900]: I0127 12:28:59.826407 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:28:59 crc kubenswrapper[4900]: I0127 12:28:59.826462 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:28:59 crc kubenswrapper[4900]: I0127 12:28:59.830026 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"465537a5-84fe-4c45-ac00-337055e94686","Type":"ContainerStarted","Data":"55f63688c2a95c666c8c1a615faff1d4b8654921d57ea2aa1dfc5095bbb8d4f0"} Jan 27 12:28:59 crc kubenswrapper[4900]: I0127 12:28:59.840641 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"a6886a8c136a22a28193ce8eae2107c6b2741c3ec08b2936b7580df2b029421b"} Jan 27 12:28:59 crc kubenswrapper[4900]: I0127 12:28:59.842315 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a207d82c-195b-44f5-bca9-fa0d029efa8a","Type":"ContainerStarted","Data":"16eddfb2d89886831cd78e1a5cbcd584f4a38957d619abf93828bdf38b549ec1"} Jan 27 12:29:00 crc kubenswrapper[4900]: I0127 12:29:00.972412 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"f7b23403ecf26e3d25cc38bdc1e7a5621d205f11c0037a331f5a40aa19d52529"} Jan 27 12:29:00 crc kubenswrapper[4900]: I0127 12:29:00.976568 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a207d82c-195b-44f5-bca9-fa0d029efa8a","Type":"ContainerStarted","Data":"65f746f782a5ceaee59e3893d1135bd661250504dcf51ce901936bc289544fac"} Jan 27 12:29:00 crc kubenswrapper[4900]: I0127 12:29:00.978795 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"3c458a82df63416d975a4232de4fe3bba6f48cab9b24864282ef6bb0471a07d5"} Jan 27 12:29:00 crc kubenswrapper[4900]: I0127 12:29:00.979018 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:29:00 crc kubenswrapper[4900]: I0127 12:29:00.982019 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"aba5987295300d4c964275e2b3b61b1544ddfd362666b4adda4f26265f92521e"} Jan 27 12:29:00 crc kubenswrapper[4900]: I0127 12:29:00.986849 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7sss" event={"ID":"160d7119-622e-4998-a7f9-7f6a362cb5a2","Type":"ContainerStarted","Data":"08c164c796070d9ba052292f661a3402a799cdba6ee63ac080903f2dc7069b77"} Jan 27 12:29:00 crc kubenswrapper[4900]: I0127 12:29:00.992266 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"465537a5-84fe-4c45-ac00-337055e94686","Type":"ContainerStarted","Data":"d775daefb29bd206b1800a12a45d247fe9a22e554e11fd1689956a46956e13c9"} Jan 27 12:29:00 crc kubenswrapper[4900]: I0127 12:29:00.993217 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:29:00 crc kubenswrapper[4900]: I0127 12:29:00.993311 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:29:01 crc kubenswrapper[4900]: I0127 12:29:01.116818 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=39.116786851 podStartE2EDuration="39.116786851s" podCreationTimestamp="2026-01-27 12:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:29:01.114887185 +0000 UTC m=+168.351915395" watchObservedRunningTime="2026-01-27 12:29:01.116786851 +0000 UTC m=+168.353815061" Jan 27 12:29:02 crc kubenswrapper[4900]: I0127 12:29:02.002411 4900 generic.go:334] "Generic (PLEG): container finished" podID="160d7119-622e-4998-a7f9-7f6a362cb5a2" containerID="08c164c796070d9ba052292f661a3402a799cdba6ee63ac080903f2dc7069b77" exitCode=0 Jan 27 12:29:02 crc kubenswrapper[4900]: I0127 12:29:02.002556 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7sss" event={"ID":"160d7119-622e-4998-a7f9-7f6a362cb5a2","Type":"ContainerDied","Data":"08c164c796070d9ba052292f661a3402a799cdba6ee63ac080903f2dc7069b77"} Jan 27 12:29:02 crc kubenswrapper[4900]: I0127 12:29:02.005765 4900 generic.go:334] "Generic (PLEG): container finished" podID="a207d82c-195b-44f5-bca9-fa0d029efa8a" containerID="65f746f782a5ceaee59e3893d1135bd661250504dcf51ce901936bc289544fac" exitCode=0 Jan 27 12:29:02 crc kubenswrapper[4900]: I0127 12:29:02.005918 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a207d82c-195b-44f5-bca9-fa0d029efa8a","Type":"ContainerDied","Data":"65f746f782a5ceaee59e3893d1135bd661250504dcf51ce901936bc289544fac"} Jan 27 12:29:02 crc kubenswrapper[4900]: I0127 12:29:02.028359 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=45.028328429 podStartE2EDuration="45.028328429s" podCreationTimestamp="2026-01-27 12:28:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:29:01.163480227 +0000 UTC m=+168.400508467" watchObservedRunningTime="2026-01-27 12:29:02.028328429 +0000 UTC m=+169.265356639" Jan 27 12:29:03 crc kubenswrapper[4900]: I0127 12:29:03.016991 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r6fg7" event={"ID":"354400fa-dc6c-4435-b9cf-09b5e76a6ef2","Type":"ContainerStarted","Data":"0808077f381c53b0b7f5dffa7080fbbf6605ec74eb52a0cd0df163c135260006"} Jan 27 12:29:03 crc kubenswrapper[4900]: I0127 12:29:03.425807 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 12:29:03 crc kubenswrapper[4900]: I0127 12:29:03.603565 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a207d82c-195b-44f5-bca9-fa0d029efa8a-kubelet-dir\") pod \"a207d82c-195b-44f5-bca9-fa0d029efa8a\" (UID: \"a207d82c-195b-44f5-bca9-fa0d029efa8a\") " Jan 27 12:29:03 crc kubenswrapper[4900]: I0127 12:29:03.603746 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a207d82c-195b-44f5-bca9-fa0d029efa8a-kube-api-access\") pod \"a207d82c-195b-44f5-bca9-fa0d029efa8a\" (UID: \"a207d82c-195b-44f5-bca9-fa0d029efa8a\") " Jan 27 12:29:03 crc kubenswrapper[4900]: I0127 12:29:03.605235 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a207d82c-195b-44f5-bca9-fa0d029efa8a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a207d82c-195b-44f5-bca9-fa0d029efa8a" (UID: "a207d82c-195b-44f5-bca9-fa0d029efa8a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:29:03 crc kubenswrapper[4900]: I0127 12:29:03.612178 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a207d82c-195b-44f5-bca9-fa0d029efa8a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a207d82c-195b-44f5-bca9-fa0d029efa8a" (UID: "a207d82c-195b-44f5-bca9-fa0d029efa8a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:29:03 crc kubenswrapper[4900]: I0127 12:29:03.705704 4900 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a207d82c-195b-44f5-bca9-fa0d029efa8a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 27 12:29:03 crc kubenswrapper[4900]: I0127 12:29:03.705753 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a207d82c-195b-44f5-bca9-fa0d029efa8a-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 12:29:04 crc kubenswrapper[4900]: I0127 12:29:04.027401 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a207d82c-195b-44f5-bca9-fa0d029efa8a","Type":"ContainerDied","Data":"16eddfb2d89886831cd78e1a5cbcd584f4a38957d619abf93828bdf38b549ec1"} Jan 27 12:29:04 crc kubenswrapper[4900]: I0127 12:29:04.027440 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 27 12:29:04 crc kubenswrapper[4900]: I0127 12:29:04.027464 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="16eddfb2d89886831cd78e1a5cbcd584f4a38957d619abf93828bdf38b549ec1" Jan 27 12:29:04 crc kubenswrapper[4900]: I0127 12:29:04.030096 4900 generic.go:334] "Generic (PLEG): container finished" podID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" containerID="0808077f381c53b0b7f5dffa7080fbbf6605ec74eb52a0cd0df163c135260006" exitCode=0 Jan 27 12:29:04 crc kubenswrapper[4900]: I0127 12:29:04.030178 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r6fg7" event={"ID":"354400fa-dc6c-4435-b9cf-09b5e76a6ef2","Type":"ContainerDied","Data":"0808077f381c53b0b7f5dffa7080fbbf6605ec74eb52a0cd0df163c135260006"} Jan 27 12:29:04 crc kubenswrapper[4900]: I0127 12:29:04.117898 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:29:04 crc kubenswrapper[4900]: I0127 12:29:04.117956 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:29:04 crc kubenswrapper[4900]: I0127 12:29:04.118543 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:29:04 crc kubenswrapper[4900]: I0127 12:29:04.118650 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:29:05 crc kubenswrapper[4900]: I0127 12:29:05.055767 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7sss" event={"ID":"160d7119-622e-4998-a7f9-7f6a362cb5a2","Type":"ContainerStarted","Data":"d341dc322a6f85bd23547584ac963eb852a6521b399f12d3100679f114a052de"} Jan 27 12:29:05 crc kubenswrapper[4900]: I0127 12:29:05.059282 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5jbr6" event={"ID":"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d","Type":"ContainerStarted","Data":"37a6cf8d126e1cf80dfdc457b082174a974bc7a23bcbf93b5bb30fe9835c5ec4"} Jan 27 12:29:05 crc kubenswrapper[4900]: I0127 12:29:05.082354 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-p7sss" podStartSLOduration=5.658632532 podStartE2EDuration="1m26.082331364s" podCreationTimestamp="2026-01-27 12:27:39 +0000 UTC" firstStartedPulling="2026-01-27 12:27:43.3696644 +0000 UTC m=+90.606692610" lastFinishedPulling="2026-01-27 12:29:03.793363232 +0000 UTC m=+171.030391442" observedRunningTime="2026-01-27 12:29:05.078518301 +0000 UTC m=+172.315546511" watchObservedRunningTime="2026-01-27 12:29:05.082331364 +0000 UTC m=+172.319359564" Jan 27 12:29:07 crc kubenswrapper[4900]: I0127 12:29:07.078344 4900 generic.go:334] "Generic (PLEG): container finished" podID="6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" containerID="37a6cf8d126e1cf80dfdc457b082174a974bc7a23bcbf93b5bb30fe9835c5ec4" exitCode=0 Jan 27 12:29:07 crc kubenswrapper[4900]: I0127 12:29:07.078436 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5jbr6" event={"ID":"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d","Type":"ContainerDied","Data":"37a6cf8d126e1cf80dfdc457b082174a974bc7a23bcbf93b5bb30fe9835c5ec4"} Jan 27 12:29:07 crc kubenswrapper[4900]: I0127 12:29:07.124148 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r6fg7" event={"ID":"354400fa-dc6c-4435-b9cf-09b5e76a6ef2","Type":"ContainerStarted","Data":"9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122"} Jan 27 12:29:07 crc kubenswrapper[4900]: I0127 12:29:07.152903 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-r6fg7" podStartSLOduration=6.415569407 podStartE2EDuration="1m29.152864865s" podCreationTimestamp="2026-01-27 12:27:38 +0000 UTC" firstStartedPulling="2026-01-27 12:27:42.175746721 +0000 UTC m=+89.412774931" lastFinishedPulling="2026-01-27 12:29:04.913042179 +0000 UTC m=+172.150070389" observedRunningTime="2026-01-27 12:29:07.150443373 +0000 UTC m=+174.387471583" watchObservedRunningTime="2026-01-27 12:29:07.152864865 +0000 UTC m=+174.389893075" Jan 27 12:29:09 crc kubenswrapper[4900]: I0127 12:29:09.001885 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:29:09 crc kubenswrapper[4900]: I0127 12:29:09.003093 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:29:10 crc kubenswrapper[4900]: I0127 12:29:10.340587 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:29:10 crc kubenswrapper[4900]: I0127 12:29:10.399117 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:29:10 crc kubenswrapper[4900]: I0127 12:29:10.401286 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:29:11 crc kubenswrapper[4900]: I0127 12:29:11.438604 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-p7sss" podUID="160d7119-622e-4998-a7f9-7f6a362cb5a2" containerName="registry-server" probeResult="failure" output=< Jan 27 12:29:11 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 12:29:11 crc kubenswrapper[4900]: > Jan 27 12:29:14 crc kubenswrapper[4900]: I0127 12:29:14.118026 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:29:14 crc kubenswrapper[4900]: I0127 12:29:14.118575 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:29:14 crc kubenswrapper[4900]: I0127 12:29:14.118673 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:29:14 crc kubenswrapper[4900]: I0127 12:29:14.118693 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:29:16 crc kubenswrapper[4900]: I0127 12:29:16.180330 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5jbr6" event={"ID":"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d","Type":"ContainerStarted","Data":"25efbcefc2590a349bf12405c0b630c392222346ca08e721c75c57c7e5b1938e"} Jan 27 12:29:19 crc kubenswrapper[4900]: I0127 12:29:19.070670 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:29:19 crc kubenswrapper[4900]: I0127 12:29:19.093085 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5jbr6" podStartSLOduration=8.974082818 podStartE2EDuration="1m41.093030095s" podCreationTimestamp="2026-01-27 12:27:38 +0000 UTC" firstStartedPulling="2026-01-27 12:27:42.172837758 +0000 UTC m=+89.409865968" lastFinishedPulling="2026-01-27 12:29:14.291785025 +0000 UTC m=+181.528813245" observedRunningTime="2026-01-27 12:29:17.202819366 +0000 UTC m=+184.439847586" watchObservedRunningTime="2026-01-27 12:29:19.093030095 +0000 UTC m=+186.330058315" Jan 27 12:29:19 crc kubenswrapper[4900]: I0127 12:29:19.305219 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:29:19 crc kubenswrapper[4900]: I0127 12:29:19.305303 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:29:19 crc kubenswrapper[4900]: I0127 12:29:19.352859 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:29:20 crc kubenswrapper[4900]: I0127 12:29:20.241907 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:29:20 crc kubenswrapper[4900]: I0127 12:29:20.305355 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5jbr6"] Jan 27 12:29:20 crc kubenswrapper[4900]: I0127 12:29:20.460687 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:29:20 crc kubenswrapper[4900]: I0127 12:29:20.505802 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:29:22 crc kubenswrapper[4900]: I0127 12:29:22.212255 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5jbr6" podUID="6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" containerName="registry-server" containerID="cri-o://25efbcefc2590a349bf12405c0b630c392222346ca08e721c75c57c7e5b1938e" gracePeriod=2 Jan 27 12:29:22 crc kubenswrapper[4900]: I0127 12:29:22.372836 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:29:22 crc kubenswrapper[4900]: I0127 12:29:22.372913 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:29:22 crc kubenswrapper[4900]: I0127 12:29:22.704207 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p7sss"] Jan 27 12:29:22 crc kubenswrapper[4900]: I0127 12:29:22.704914 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-p7sss" podUID="160d7119-622e-4998-a7f9-7f6a362cb5a2" containerName="registry-server" containerID="cri-o://d341dc322a6f85bd23547584ac963eb852a6521b399f12d3100679f114a052de" gracePeriod=2 Jan 27 12:29:23 crc kubenswrapper[4900]: I0127 12:29:23.222140 4900 generic.go:334] "Generic (PLEG): container finished" podID="6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" containerID="25efbcefc2590a349bf12405c0b630c392222346ca08e721c75c57c7e5b1938e" exitCode=0 Jan 27 12:29:23 crc kubenswrapper[4900]: I0127 12:29:23.222200 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5jbr6" event={"ID":"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d","Type":"ContainerDied","Data":"25efbcefc2590a349bf12405c0b630c392222346ca08e721c75c57c7e5b1938e"} Jan 27 12:29:23 crc kubenswrapper[4900]: I0127 12:29:23.857734 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.012337 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzvjg\" (UniqueName: \"kubernetes.io/projected/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-kube-api-access-qzvjg\") pod \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\" (UID: \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\") " Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.012527 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-catalog-content\") pod \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\" (UID: \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\") " Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.012592 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-utilities\") pod \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\" (UID: \"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d\") " Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.014128 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-utilities" (OuterVolumeSpecName: "utilities") pod "6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" (UID: "6cc0d6e5-4b91-4226-99a2-165e8bea6a7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.021809 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-kube-api-access-qzvjg" (OuterVolumeSpecName: "kube-api-access-qzvjg") pod "6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" (UID: "6cc0d6e5-4b91-4226-99a2-165e8bea6a7d"). InnerVolumeSpecName "kube-api-access-qzvjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.037694 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" (UID: "6cc0d6e5-4b91-4226-99a2-165e8bea6a7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.114013 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzvjg\" (UniqueName: \"kubernetes.io/projected/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-kube-api-access-qzvjg\") on node \"crc\" DevicePath \"\"" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.114081 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.114093 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.118584 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.118700 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.119673 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.119837 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.119949 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-zbp4l" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.121225 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.121276 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.121427 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"c5de183b802fabbd9cd34387612b723ecf2d6162c246c097abe251324a00062e"} pod="openshift-console/downloads-7954f5f757-zbp4l" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.121503 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" containerID="cri-o://c5de183b802fabbd9cd34387612b723ecf2d6162c246c097abe251324a00062e" gracePeriod=2 Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.230528 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5jbr6" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.230527 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5jbr6" event={"ID":"6cc0d6e5-4b91-4226-99a2-165e8bea6a7d","Type":"ContainerDied","Data":"8e008a60254b604a2fc26d221acf28e522131876394847c966625038983010b0"} Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.230739 4900 scope.go:117] "RemoveContainer" containerID="25efbcefc2590a349bf12405c0b630c392222346ca08e721c75c57c7e5b1938e" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.238201 4900 generic.go:334] "Generic (PLEG): container finished" podID="160d7119-622e-4998-a7f9-7f6a362cb5a2" containerID="d341dc322a6f85bd23547584ac963eb852a6521b399f12d3100679f114a052de" exitCode=0 Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.238305 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7sss" event={"ID":"160d7119-622e-4998-a7f9-7f6a362cb5a2","Type":"ContainerDied","Data":"d341dc322a6f85bd23547584ac963eb852a6521b399f12d3100679f114a052de"} Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.268937 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5jbr6"] Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.273632 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5jbr6"] Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.325224 4900 scope.go:117] "RemoveContainer" containerID="37a6cf8d126e1cf80dfdc457b082174a974bc7a23bcbf93b5bb30fe9835c5ec4" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.344747 4900 scope.go:117] "RemoveContainer" containerID="21def6407c52b77ddd371a02e16da8659df5db996ee0b1c935dc1cad52d2ed8b" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.494274 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" path="/var/lib/kubelet/pods/6cc0d6e5-4b91-4226-99a2-165e8bea6a7d/volumes" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.601382 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.723878 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/160d7119-622e-4998-a7f9-7f6a362cb5a2-catalog-content\") pod \"160d7119-622e-4998-a7f9-7f6a362cb5a2\" (UID: \"160d7119-622e-4998-a7f9-7f6a362cb5a2\") " Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.724147 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/160d7119-622e-4998-a7f9-7f6a362cb5a2-utilities\") pod \"160d7119-622e-4998-a7f9-7f6a362cb5a2\" (UID: \"160d7119-622e-4998-a7f9-7f6a362cb5a2\") " Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.724284 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cdzt\" (UniqueName: \"kubernetes.io/projected/160d7119-622e-4998-a7f9-7f6a362cb5a2-kube-api-access-7cdzt\") pod \"160d7119-622e-4998-a7f9-7f6a362cb5a2\" (UID: \"160d7119-622e-4998-a7f9-7f6a362cb5a2\") " Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.726263 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/160d7119-622e-4998-a7f9-7f6a362cb5a2-utilities" (OuterVolumeSpecName: "utilities") pod "160d7119-622e-4998-a7f9-7f6a362cb5a2" (UID: "160d7119-622e-4998-a7f9-7f6a362cb5a2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.731873 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/160d7119-622e-4998-a7f9-7f6a362cb5a2-kube-api-access-7cdzt" (OuterVolumeSpecName: "kube-api-access-7cdzt") pod "160d7119-622e-4998-a7f9-7f6a362cb5a2" (UID: "160d7119-622e-4998-a7f9-7f6a362cb5a2"). InnerVolumeSpecName "kube-api-access-7cdzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.826914 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cdzt\" (UniqueName: \"kubernetes.io/projected/160d7119-622e-4998-a7f9-7f6a362cb5a2-kube-api-access-7cdzt\") on node \"crc\" DevicePath \"\"" Jan 27 12:29:24 crc kubenswrapper[4900]: I0127 12:29:24.826972 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/160d7119-622e-4998-a7f9-7f6a362cb5a2-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:29:25 crc kubenswrapper[4900]: I0127 12:29:25.249745 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p7sss" event={"ID":"160d7119-622e-4998-a7f9-7f6a362cb5a2","Type":"ContainerDied","Data":"c1f0d563e35e049fb8e4970ee52247b88361de822499469d1688ff96bbc55c95"} Jan 27 12:29:25 crc kubenswrapper[4900]: I0127 12:29:25.249817 4900 scope.go:117] "RemoveContainer" containerID="d341dc322a6f85bd23547584ac963eb852a6521b399f12d3100679f114a052de" Jan 27 12:29:25 crc kubenswrapper[4900]: I0127 12:29:25.249770 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p7sss" Jan 27 12:29:25 crc kubenswrapper[4900]: I0127 12:29:25.254781 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-grrcf" event={"ID":"754ddb47-7891-4142-b52c-98a29ca40078","Type":"ContainerStarted","Data":"902878fa2d3acce0e9f7407e81451b1fdb2e3187dcec72548cbdc07571bd683e"} Jan 27 12:29:25 crc kubenswrapper[4900]: I0127 12:29:25.270562 4900 scope.go:117] "RemoveContainer" containerID="08c164c796070d9ba052292f661a3402a799cdba6ee63ac080903f2dc7069b77" Jan 27 12:29:25 crc kubenswrapper[4900]: I0127 12:29:25.288857 4900 scope.go:117] "RemoveContainer" containerID="7e78daa90011f87779c591f3167107cc00ef05528e67ef1fd7f2c517b30b6f33" Jan 27 12:29:25 crc kubenswrapper[4900]: I0127 12:29:25.333342 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/160d7119-622e-4998-a7f9-7f6a362cb5a2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "160d7119-622e-4998-a7f9-7f6a362cb5a2" (UID: "160d7119-622e-4998-a7f9-7f6a362cb5a2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:29:25 crc kubenswrapper[4900]: I0127 12:29:25.335099 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/160d7119-622e-4998-a7f9-7f6a362cb5a2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:29:25 crc kubenswrapper[4900]: I0127 12:29:25.605717 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p7sss"] Jan 27 12:29:25 crc kubenswrapper[4900]: I0127 12:29:25.610861 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-p7sss"] Jan 27 12:29:26 crc kubenswrapper[4900]: I0127 12:29:26.262704 4900 generic.go:334] "Generic (PLEG): container finished" podID="754ddb47-7891-4142-b52c-98a29ca40078" containerID="902878fa2d3acce0e9f7407e81451b1fdb2e3187dcec72548cbdc07571bd683e" exitCode=0 Jan 27 12:29:26 crc kubenswrapper[4900]: I0127 12:29:26.262774 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-grrcf" event={"ID":"754ddb47-7891-4142-b52c-98a29ca40078","Type":"ContainerDied","Data":"902878fa2d3acce0e9f7407e81451b1fdb2e3187dcec72548cbdc07571bd683e"} Jan 27 12:29:26 crc kubenswrapper[4900]: I0127 12:29:26.268275 4900 generic.go:334] "Generic (PLEG): container finished" podID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerID="c5de183b802fabbd9cd34387612b723ecf2d6162c246c097abe251324a00062e" exitCode=0 Jan 27 12:29:26 crc kubenswrapper[4900]: I0127 12:29:26.268386 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-zbp4l" event={"ID":"75d8fa11-eb06-4aae-8e96-3bb4328d69d7","Type":"ContainerDied","Data":"c5de183b802fabbd9cd34387612b723ecf2d6162c246c097abe251324a00062e"} Jan 27 12:29:26 crc kubenswrapper[4900]: I0127 12:29:26.268470 4900 scope.go:117] "RemoveContainer" containerID="4c2a78bc02f54661dc21c1c5a98c1a4ed695bc4feb4be9ce0990c10fee1a9331" Jan 27 12:29:26 crc kubenswrapper[4900]: I0127 12:29:26.491380 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="160d7119-622e-4998-a7f9-7f6a362cb5a2" path="/var/lib/kubelet/pods/160d7119-622e-4998-a7f9-7f6a362cb5a2/volumes" Jan 27 12:29:27 crc kubenswrapper[4900]: I0127 12:29:27.281118 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-zbp4l" event={"ID":"75d8fa11-eb06-4aae-8e96-3bb4328d69d7","Type":"ContainerStarted","Data":"8624a91fa11ee5fbdce466ab359a95493d21154b974a7b4c26ee5aa95e700d57"} Jan 27 12:29:29 crc kubenswrapper[4900]: I0127 12:29:29.295594 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-zbp4l" Jan 27 12:29:29 crc kubenswrapper[4900]: I0127 12:29:29.296014 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:29:29 crc kubenswrapper[4900]: I0127 12:29:29.296048 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:29:30 crc kubenswrapper[4900]: I0127 12:29:30.298971 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:29:30 crc kubenswrapper[4900]: I0127 12:29:30.299401 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:29:34 crc kubenswrapper[4900]: I0127 12:29:34.118172 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:29:34 crc kubenswrapper[4900]: I0127 12:29:34.119158 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:29:34 crc kubenswrapper[4900]: I0127 12:29:34.119857 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:29:34 crc kubenswrapper[4900]: I0127 12:29:34.119881 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:29:34 crc kubenswrapper[4900]: I0127 12:29:34.267261 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 27 12:29:36 crc kubenswrapper[4900]: I0127 12:29:36.370436 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9s4cn" event={"ID":"93a3d7a3-dddf-49e3-be5b-5369108a5e13","Type":"ContainerStarted","Data":"7cf8cc2e0939176c9937ce00b46e9a1c83d4e636719df6fdb77d87e33709bedc"} Jan 27 12:29:36 crc kubenswrapper[4900]: I0127 12:29:36.374174 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5sf2" event={"ID":"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4","Type":"ContainerStarted","Data":"e3af023d7adf7f632efb7e0fd6942143d3c6955f8bbac1a56d859414731e92ce"} Jan 27 12:29:37 crc kubenswrapper[4900]: I0127 12:29:37.388209 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jvcrt" event={"ID":"86867c21-fd52-44e7-85d2-87da48322397","Type":"ContainerStarted","Data":"d1ac6f9cb085f6dda71bea07763408defa033a038093cb9cb9fc11192b69f77b"} Jan 27 12:29:37 crc kubenswrapper[4900]: I0127 12:29:37.390249 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-grrcf" event={"ID":"754ddb47-7891-4142-b52c-98a29ca40078","Type":"ContainerStarted","Data":"e5fcd44b18666d4587e5663923891df7d0036bbca45f909b308f25954c2a32a1"} Jan 27 12:29:37 crc kubenswrapper[4900]: I0127 12:29:37.393667 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n9dph" event={"ID":"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57","Type":"ContainerStarted","Data":"df8c9ca11509273f21cb7cf634f7b7b2d013ce583237c9b00101d565d74afadc"} Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.618653 4900 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.619296 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a207d82c-195b-44f5-bca9-fa0d029efa8a" containerName="pruner" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.619312 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a207d82c-195b-44f5-bca9-fa0d029efa8a" containerName="pruner" Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.619332 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" containerName="extract-utilities" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.619341 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" containerName="extract-utilities" Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.619360 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a5d7b57-de98-445e-83b6-1ff0eb859e01" containerName="kube-multus-additional-cni-plugins" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.619367 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a5d7b57-de98-445e-83b6-1ff0eb859e01" containerName="kube-multus-additional-cni-plugins" Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.619377 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="160d7119-622e-4998-a7f9-7f6a362cb5a2" containerName="extract-content" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.619384 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="160d7119-622e-4998-a7f9-7f6a362cb5a2" containerName="extract-content" Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.619393 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" containerName="registry-server" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.619400 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" containerName="registry-server" Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.619413 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="160d7119-622e-4998-a7f9-7f6a362cb5a2" containerName="registry-server" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.619419 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="160d7119-622e-4998-a7f9-7f6a362cb5a2" containerName="registry-server" Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.619425 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="160d7119-622e-4998-a7f9-7f6a362cb5a2" containerName="extract-utilities" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.619431 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="160d7119-622e-4998-a7f9-7f6a362cb5a2" containerName="extract-utilities" Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.619441 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" containerName="extract-content" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.619448 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" containerName="extract-content" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.619587 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cc0d6e5-4b91-4226-99a2-165e8bea6a7d" containerName="registry-server" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.619601 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a5d7b57-de98-445e-83b6-1ff0eb859e01" containerName="kube-multus-additional-cni-plugins" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.619610 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="160d7119-622e-4998-a7f9-7f6a362cb5a2" containerName="registry-server" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.619618 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a207d82c-195b-44f5-bca9-fa0d029efa8a" containerName="pruner" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.620036 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.620677 4900 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.621106 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc" gracePeriod=15 Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.621235 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d" gracePeriod=15 Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.621140 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://355e30f76b375b00057638f14bf0b41adf853c25aba5fc64ed7879e242b936c3" gracePeriod=15 Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.621329 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc" gracePeriod=15 Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.621420 4900 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.621335 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f" gracePeriod=15 Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.622082 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.622206 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.622237 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.622247 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.622287 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.622299 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.622308 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.622316 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.622328 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.622362 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.622388 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.622399 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.622417 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.622479 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.622735 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.622759 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.622773 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.622785 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.622835 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.622848 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 27 12:29:38 crc kubenswrapper[4900]: E0127 12:29:38.623192 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.623210 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.623478 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.717729 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.717792 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.717832 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.717852 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.717893 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.718239 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.718522 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.718612 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.830253 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.830369 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.830434 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.830461 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.830504 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.830530 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.830576 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.830623 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.830757 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.830834 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.830873 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.830917 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.830951 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.830983 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.831019 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:38 crc kubenswrapper[4900]: I0127 12:29:38.831220 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.417547 4900 generic.go:334] "Generic (PLEG): container finished" podID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" containerID="e3af023d7adf7f632efb7e0fd6942143d3c6955f8bbac1a56d859414731e92ce" exitCode=0 Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.417638 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5sf2" event={"ID":"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4","Type":"ContainerDied","Data":"e3af023d7adf7f632efb7e0fd6942143d3c6955f8bbac1a56d859414731e92ce"} Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.419050 4900 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.419640 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.422415 4900 generic.go:334] "Generic (PLEG): container finished" podID="86867c21-fd52-44e7-85d2-87da48322397" containerID="d1ac6f9cb085f6dda71bea07763408defa033a038093cb9cb9fc11192b69f77b" exitCode=0 Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.422563 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jvcrt" event={"ID":"86867c21-fd52-44e7-85d2-87da48322397","Type":"ContainerDied","Data":"d1ac6f9cb085f6dda71bea07763408defa033a038093cb9cb9fc11192b69f77b"} Jan 27 12:29:39 crc kubenswrapper[4900]: E0127 12:29:39.422706 4900 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.220:6443: connect: connection refused" event="&Event{ObjectMeta:{community-operators-q5sf2.188e96514676226b openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:community-operators-q5sf2,UID:c7ff526b-ed0d-42d2-98bf-2f237d76cbf4,APIVersion:v1,ResourceVersion:28414,FieldPath:spec.containers{registry-server},},Reason:Pulling,Message:Pulling image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\",Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-27 12:29:39.421848171 +0000 UTC m=+206.658876381,LastTimestamp:2026-01-27 12:29:39.421848171 +0000 UTC m=+206.658876381,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.423978 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.424334 4900 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.424589 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.426481 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.428848 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.430850 4900 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f" exitCode=2 Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.439504 4900 generic.go:334] "Generic (PLEG): container finished" podID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" containerID="df8c9ca11509273f21cb7cf634f7b7b2d013ce583237c9b00101d565d74afadc" exitCode=0 Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.439844 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n9dph" event={"ID":"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57","Type":"ContainerDied","Data":"df8c9ca11509273f21cb7cf634f7b7b2d013ce583237c9b00101d565d74afadc"} Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.440504 4900 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" start-of-body= Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.440708 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.441079 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.441543 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.441789 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.446533 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.447919 4900 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.448892 4900 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.449526 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.450300 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.450917 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.452177 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:39 crc kubenswrapper[4900]: E0127 12:29:39.545013 4900 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.220:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:39 crc kubenswrapper[4900]: I0127 12:29:39.545910 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:40 crc kubenswrapper[4900]: E0127 12:29:40.421064 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:29:40Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:29:40Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:29:40Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:29:40Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:024b1ed0676c2e11f6a319392c82e7acd0ceeae31ca00b202307c4d86a796b20\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:ada03173793960eaa0e4263282fcbf5af3dea8aaf2c3b0d864906108db062e8a\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1672061160},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:200a8645cdce6d8fa5a43098c67b88945e73bd2cae92ca7b61297d44fdc66978\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:fb0551b0a119afe949321afb7bfcae7fd008137fa5b974a8a4c36e3d937e8bce\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1201577188},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:2a894e1c38c447ab3aeadd069481fe8598e475279349518868fbba58861a9d20\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:b3efc392f71c68e5db92c84992e1e340a0fc167dd2cd8e110a6c23037738d467\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1185882355},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:6d91aecdb391dd0cbb56f2b6335674bd2b4a25c63f0b9e893ba8977a71be3c0d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:98739198606db13baf3fa39b12298669778a619dff80b9b5d51987d7f76056c9\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1180173538},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: E0127 12:29:40.422060 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: E0127 12:29:40.422557 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: E0127 12:29:40.422870 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: E0127 12:29:40.423371 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: E0127 12:29:40.423412 4900 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.450788 4900 generic.go:334] "Generic (PLEG): container finished" podID="465537a5-84fe-4c45-ac00-337055e94686" containerID="d775daefb29bd206b1800a12a45d247fe9a22e554e11fd1689956a46956e13c9" exitCode=0 Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.450902 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"465537a5-84fe-4c45-ac00-337055e94686","Type":"ContainerDied","Data":"d775daefb29bd206b1800a12a45d247fe9a22e554e11fd1689956a46956e13c9"} Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.452489 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.453283 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.453702 4900 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.454089 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.454391 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.454625 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.455216 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"231c70fcfd8764a1b0c1cbd9a7a97637bbccaf8cc0871bb1eb7c28e0f5cec6d5"} Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.458734 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.461128 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.462039 4900 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="355e30f76b375b00057638f14bf0b41adf853c25aba5fc64ed7879e242b936c3" exitCode=0 Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.462119 4900 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc" exitCode=0 Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.462132 4900 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d" exitCode=0 Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.462196 4900 scope.go:117] "RemoveContainer" containerID="1029871e73677c51b87986b9f7dcbe59cfc8952c28efaea8f80014fbcfd8a78b" Jan 27 12:29:40 crc kubenswrapper[4900]: E0127 12:29:40.572252 4900 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: E0127 12:29:40.573369 4900 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: E0127 12:29:40.574377 4900 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: E0127 12:29:40.574838 4900 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: E0127 12:29:40.575219 4900 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:40 crc kubenswrapper[4900]: I0127 12:29:40.575262 4900 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 27 12:29:40 crc kubenswrapper[4900]: E0127 12:29:40.575615 4900 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" interval="200ms" Jan 27 12:29:40 crc kubenswrapper[4900]: E0127 12:29:40.777790 4900 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" interval="400ms" Jan 27 12:29:40 crc kubenswrapper[4900]: E0127 12:29:40.983414 4900 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.220:6443: connect: connection refused" event="&Event{ObjectMeta:{community-operators-q5sf2.188e96514676226b openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:community-operators-q5sf2,UID:c7ff526b-ed0d-42d2-98bf-2f237d76cbf4,APIVersion:v1,ResourceVersion:28414,FieldPath:spec.containers{registry-server},},Reason:Pulling,Message:Pulling image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\",Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-27 12:29:39.421848171 +0000 UTC m=+206.658876381,LastTimestamp:2026-01-27 12:29:39.421848171 +0000 UTC m=+206.658876381,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 27 12:29:41 crc kubenswrapper[4900]: E0127 12:29:41.180280 4900 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" interval="800ms" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.493180 4900 generic.go:334] "Generic (PLEG): container finished" podID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" containerID="7cf8cc2e0939176c9937ce00b46e9a1c83d4e636719df6fdb77d87e33709bedc" exitCode=0 Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.493420 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9s4cn" event={"ID":"93a3d7a3-dddf-49e3-be5b-5369108a5e13","Type":"ContainerDied","Data":"7cf8cc2e0939176c9937ce00b46e9a1c83d4e636719df6fdb77d87e33709bedc"} Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.495968 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.496836 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.497537 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.497790 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.498054 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.498312 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.500796 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"0e5fb78b0b2de44f2fbc29b56c67b6c850b41c71a304b6812fddb5d073f381b8"} Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.510296 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.804332 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.805264 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.805728 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.805970 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.806293 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.806647 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.808564 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.958329 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/465537a5-84fe-4c45-ac00-337055e94686-var-lock\") pod \"465537a5-84fe-4c45-ac00-337055e94686\" (UID: \"465537a5-84fe-4c45-ac00-337055e94686\") " Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.958466 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/465537a5-84fe-4c45-ac00-337055e94686-kube-api-access\") pod \"465537a5-84fe-4c45-ac00-337055e94686\" (UID: \"465537a5-84fe-4c45-ac00-337055e94686\") " Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.958577 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/465537a5-84fe-4c45-ac00-337055e94686-kubelet-dir\") pod \"465537a5-84fe-4c45-ac00-337055e94686\" (UID: \"465537a5-84fe-4c45-ac00-337055e94686\") " Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.958745 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/465537a5-84fe-4c45-ac00-337055e94686-var-lock" (OuterVolumeSpecName: "var-lock") pod "465537a5-84fe-4c45-ac00-337055e94686" (UID: "465537a5-84fe-4c45-ac00-337055e94686"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.958838 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/465537a5-84fe-4c45-ac00-337055e94686-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "465537a5-84fe-4c45-ac00-337055e94686" (UID: "465537a5-84fe-4c45-ac00-337055e94686"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.959579 4900 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/465537a5-84fe-4c45-ac00-337055e94686-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.959619 4900 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/465537a5-84fe-4c45-ac00-337055e94686-var-lock\") on node \"crc\" DevicePath \"\"" Jan 27 12:29:41 crc kubenswrapper[4900]: I0127 12:29:41.967272 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/465537a5-84fe-4c45-ac00-337055e94686-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "465537a5-84fe-4c45-ac00-337055e94686" (UID: "465537a5-84fe-4c45-ac00-337055e94686"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:29:41 crc kubenswrapper[4900]: E0127 12:29:41.982079 4900 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" interval="1.6s" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.061704 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/465537a5-84fe-4c45-ac00-337055e94686-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.532817 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.533065 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"465537a5-84fe-4c45-ac00-337055e94686","Type":"ContainerDied","Data":"55f63688c2a95c666c8c1a615faff1d4b8654921d57ea2aa1dfc5095bbb8d4f0"} Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.533154 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="55f63688c2a95c666c8c1a615faff1d4b8654921d57ea2aa1dfc5095bbb8d4f0" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.539140 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.540181 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.540861 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.541185 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.541389 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.542280 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.542787 4900 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc" exitCode=0 Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.542797 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.543879 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: E0127 12:29:42.543985 4900 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.220:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.544835 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.545326 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.545600 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.545808 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.546402 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.889281 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.890178 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.890916 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.891329 4900 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.891698 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.892304 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.892579 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.892918 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:42 crc kubenswrapper[4900]: I0127 12:29:42.893210 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.080831 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.080972 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.081040 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.081099 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.081102 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.081128 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.081487 4900 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.081503 4900 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.081515 4900 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.553297 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.555352 4900 scope.go:117] "RemoveContainer" containerID="355e30f76b375b00057638f14bf0b41adf853c25aba5fc64ed7879e242b936c3" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.555648 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.574739 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.575526 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.575761 4900 scope.go:117] "RemoveContainer" containerID="66e242207d13f54f5fdbdd354eb4d280a725449730563f22eb860f70ad1beebc" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.575928 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.576660 4900 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.577011 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.577391 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.577651 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:43 crc kubenswrapper[4900]: E0127 12:29:43.582855 4900 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" interval="3.2s" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.594371 4900 scope.go:117] "RemoveContainer" containerID="0e51dd80b8fc1d18cbe5f8a9d93ad94655705c2a7973f193a0eb82af09d2876d" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.614543 4900 scope.go:117] "RemoveContainer" containerID="5968048dea10c2e4fbeefcc4a66e2b8f74d5efb0afee4dff881ee9b2cc7ba06f" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.629991 4900 scope.go:117] "RemoveContainer" containerID="e7f4d5e92694f5c17712e4570580bf46a1cce773e6fc511877fc1c46fe7ab0fc" Jan 27 12:29:43 crc kubenswrapper[4900]: I0127 12:29:43.653464 4900 scope.go:117] "RemoveContainer" containerID="f59e3609dcc8cf39bc80c95a80b041aa9777f34bc498b028702e83601ee95e99" Jan 27 12:29:44 crc kubenswrapper[4900]: I0127 12:29:44.118384 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:29:44 crc kubenswrapper[4900]: I0127 12:29:44.118742 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:29:44 crc kubenswrapper[4900]: I0127 12:29:44.118409 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 27 12:29:44 crc kubenswrapper[4900]: I0127 12:29:44.118867 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 27 12:29:44 crc kubenswrapper[4900]: I0127 12:29:44.578264 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 27 12:29:46 crc kubenswrapper[4900]: I0127 12:29:46.487285 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:46 crc kubenswrapper[4900]: I0127 12:29:46.488785 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:46 crc kubenswrapper[4900]: I0127 12:29:46.489435 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:46 crc kubenswrapper[4900]: I0127 12:29:46.489803 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:46 crc kubenswrapper[4900]: I0127 12:29:46.490358 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:46 crc kubenswrapper[4900]: I0127 12:29:46.491273 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:46 crc kubenswrapper[4900]: E0127 12:29:46.784320 4900 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" interval="6.4s" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.049730 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.049848 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.101205 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.102294 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.103219 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.104160 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.104497 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.104831 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.105155 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.665777 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.666754 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.667664 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.668074 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.668796 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.669759 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:47 crc kubenswrapper[4900]: I0127 12:29:47.670625 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:49 crc kubenswrapper[4900]: I0127 12:29:49.480968 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:49 crc kubenswrapper[4900]: I0127 12:29:49.483728 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:49 crc kubenswrapper[4900]: I0127 12:29:49.484622 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:49 crc kubenswrapper[4900]: I0127 12:29:49.484931 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:49 crc kubenswrapper[4900]: I0127 12:29:49.485242 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:49 crc kubenswrapper[4900]: I0127 12:29:49.485567 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:49 crc kubenswrapper[4900]: I0127 12:29:49.485802 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:49 crc kubenswrapper[4900]: I0127 12:29:49.505665 4900 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:29:49 crc kubenswrapper[4900]: I0127 12:29:49.505722 4900 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:29:49 crc kubenswrapper[4900]: E0127 12:29:49.506356 4900 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:49 crc kubenswrapper[4900]: I0127 12:29:49.507181 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:50 crc kubenswrapper[4900]: E0127 12:29:50.605110 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:29:50Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:29:50Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:29:50Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-27T12:29:50Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:024b1ed0676c2e11f6a319392c82e7acd0ceeae31ca00b202307c4d86a796b20\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:ada03173793960eaa0e4263282fcbf5af3dea8aaf2c3b0d864906108db062e8a\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1672061160},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:200a8645cdce6d8fa5a43098c67b88945e73bd2cae92ca7b61297d44fdc66978\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:fb0551b0a119afe949321afb7bfcae7fd008137fa5b974a8a4c36e3d937e8bce\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1201577188},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:2a894e1c38c447ab3aeadd069481fe8598e475279349518868fbba58861a9d20\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:b3efc392f71c68e5db92c84992e1e340a0fc167dd2cd8e110a6c23037738d467\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1185882355},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:6d91aecdb391dd0cbb56f2b6335674bd2b4a25c63f0b9e893ba8977a71be3c0d\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:98739198606db13baf3fa39b12298669778a619dff80b9b5d51987d7f76056c9\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1180173538},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:50 crc kubenswrapper[4900]: E0127 12:29:50.605918 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:50 crc kubenswrapper[4900]: E0127 12:29:50.606434 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:50 crc kubenswrapper[4900]: E0127 12:29:50.607277 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:50 crc kubenswrapper[4900]: E0127 12:29:50.607580 4900 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:50 crc kubenswrapper[4900]: E0127 12:29:50.607609 4900 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 27 12:29:50 crc kubenswrapper[4900]: E0127 12:29:50.987202 4900 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.220:6443: connect: connection refused" event="&Event{ObjectMeta:{community-operators-q5sf2.188e96514676226b openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:community-operators-q5sf2,UID:c7ff526b-ed0d-42d2-98bf-2f237d76cbf4,APIVersion:v1,ResourceVersion:28414,FieldPath:spec.containers{registry-server},},Reason:Pulling,Message:Pulling image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\",Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-27 12:29:39.421848171 +0000 UTC m=+206.658876381,LastTimestamp:2026-01-27 12:29:39.421848171 +0000 UTC m=+206.658876381,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 27 12:29:51 crc kubenswrapper[4900]: I0127 12:29:51.630573 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"64c41666a44fbded571608d797d2d7f681e295e517893d8fabc46bd25554fcaf"} Jan 27 12:29:52 crc kubenswrapper[4900]: I0127 12:29:52.372695 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:29:52 crc kubenswrapper[4900]: I0127 12:29:52.372794 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:29:52 crc kubenswrapper[4900]: I0127 12:29:52.671488 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9s4cn" event={"ID":"93a3d7a3-dddf-49e3-be5b-5369108a5e13","Type":"ContainerStarted","Data":"4628df3d3bc34de97560d8cb0b2fce0302a9a745a901b04ff425f5f7357864ff"} Jan 27 12:29:52 crc kubenswrapper[4900]: I0127 12:29:52.730194 4900 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Readiness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 27 12:29:52 crc kubenswrapper[4900]: I0127 12:29:52.730285 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: E0127 12:29:53.185424 4900 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.220:6443: connect: connection refused" interval="7s" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.684979 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.685081 4900 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a" exitCode=1 Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.685176 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a"} Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.685885 4900 scope.go:117] "RemoveContainer" containerID="cf3122af7bb47e2b162fe748732fdb01dc33bc852eb7af22477c4d37f279c04a" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.686933 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.687305 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.687467 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.687618 4900 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.687824 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.688012 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.688203 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.689347 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5sf2" event={"ID":"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4","Type":"ContainerStarted","Data":"6bdcf83296f0fd0cf6e4ff8a0d72376166c2e1abddb85359d63db8a5b40dba14"} Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.690314 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.690541 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.690805 4900 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.691296 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.691624 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.691824 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.692012 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.694701 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7a8d74d566de9f72ad9b085c2a9141a022977368a81346aa869a8b529a94f24e"} Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.695044 4900 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.695087 4900 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:29:53 crc kubenswrapper[4900]: E0127 12:29:53.695951 4900 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.696480 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.697331 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.697667 4900 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.697840 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.697991 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.698266 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.698422 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.699446 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jvcrt" event={"ID":"86867c21-fd52-44e7-85d2-87da48322397","Type":"ContainerStarted","Data":"a5338c72764cd468a9a4699ae43a19077c903f169068a51d6494b6f9cb03da8e"} Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.700357 4900 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.700562 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.700736 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.700914 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.701119 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.701306 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.701478 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.705110 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n9dph" event={"ID":"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57","Type":"ContainerStarted","Data":"71e2cfc2a809ab7b3df8b70baa8b21d0ebcaf2b6023d922dba2b5a4af66b0289"} Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.705244 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.705632 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.706002 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.706240 4900 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.706493 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.706713 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.706944 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.707639 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.708345 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.711168 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.711413 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.711577 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.711751 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:53 crc kubenswrapper[4900]: I0127 12:29:53.711916 4900 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.133559 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-zbp4l" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.134521 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.135039 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.135598 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.135868 4900 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.136109 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.136510 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.136738 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.136951 4900 status_manager.go:851] "Failed to get status for pod" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" pod="openshift-console/downloads-7954f5f757-zbp4l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-zbp4l\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.714025 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.714396 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f9685cb5ae5d2b3ab7074e3c3eee36a6229bd0b87b8158ce4f761c36509c5b6e"} Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.716512 4900 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="7a8d74d566de9f72ad9b085c2a9141a022977368a81346aa869a8b529a94f24e" exitCode=0 Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.716605 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"7a8d74d566de9f72ad9b085c2a9141a022977368a81346aa869a8b529a94f24e"} Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.716862 4900 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.716900 4900 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.717340 4900 status_manager.go:851] "Failed to get status for pod" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" pod="openshift-console/downloads-7954f5f757-zbp4l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-zbp4l\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: E0127 12:29:54.717357 4900 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.717778 4900 status_manager.go:851] "Failed to get status for pod" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" pod="openshift-marketplace/community-operators-q5sf2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-q5sf2\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.718129 4900 status_manager.go:851] "Failed to get status for pod" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" pod="openshift-marketplace/certified-operators-n9dph" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-n9dph\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.718439 4900 status_manager.go:851] "Failed to get status for pod" podUID="465537a5-84fe-4c45-ac00-337055e94686" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.718705 4900 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.718947 4900 status_manager.go:851] "Failed to get status for pod" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" pod="openshift-marketplace/redhat-operators-9s4cn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-9s4cn\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.719233 4900 status_manager.go:851] "Failed to get status for pod" podUID="754ddb47-7891-4142-b52c-98a29ca40078" pod="openshift-marketplace/certified-operators-grrcf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-grrcf\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:54 crc kubenswrapper[4900]: I0127 12:29:54.719467 4900 status_manager.go:851] "Failed to get status for pod" podUID="86867c21-fd52-44e7-85d2-87da48322397" pod="openshift-marketplace/community-operators-jvcrt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-jvcrt\": dial tcp 38.102.83.220:6443: connect: connection refused" Jan 27 12:29:55 crc kubenswrapper[4900]: I0127 12:29:55.725526 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7a7016ed91476e9c84bd638ad111464aa10e1dc083a86addd04ca64dce0f6bee"} Jan 27 12:29:56 crc kubenswrapper[4900]: I0127 12:29:56.741872 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3e5857aaa1a837d2a5c75684044bf63be507e6e56b6dc1bb33119e2d26d1c4fa"} Jan 27 12:29:57 crc kubenswrapper[4900]: I0127 12:29:57.005428 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:29:57 crc kubenswrapper[4900]: I0127 12:29:57.005522 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:29:57 crc kubenswrapper[4900]: I0127 12:29:57.058191 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:29:57 crc kubenswrapper[4900]: I0127 12:29:57.134995 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:29:57 crc kubenswrapper[4900]: I0127 12:29:57.135845 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:29:57 crc kubenswrapper[4900]: I0127 12:29:57.171741 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:29:57 crc kubenswrapper[4900]: I0127 12:29:57.651322 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:29:57 crc kubenswrapper[4900]: I0127 12:29:57.651422 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:29:57 crc kubenswrapper[4900]: I0127 12:29:57.694342 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:29:57 crc kubenswrapper[4900]: I0127 12:29:57.798258 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:29:57 crc kubenswrapper[4900]: I0127 12:29:57.799673 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:29:57 crc kubenswrapper[4900]: I0127 12:29:57.805083 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:29:59 crc kubenswrapper[4900]: I0127 12:29:59.771023 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"14c675c1aa587a57e7132663d0c743584d10630145227e4b5e8d6a104ad31db5"} Jan 27 12:29:59 crc kubenswrapper[4900]: I0127 12:29:59.936480 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:29:59 crc kubenswrapper[4900]: I0127 12:29:59.937018 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:29:59 crc kubenswrapper[4900]: I0127 12:29:59.990144 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:30:00 crc kubenswrapper[4900]: I0127 12:30:00.315617 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 12:30:00 crc kubenswrapper[4900]: I0127 12:30:00.320571 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 12:30:00 crc kubenswrapper[4900]: I0127 12:30:00.784580 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"b225785a043f3a0e06dbff68b01ed1f97759682e9e0596243272f3e3640b17df"} Jan 27 12:30:00 crc kubenswrapper[4900]: I0127 12:30:00.784925 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 12:30:00 crc kubenswrapper[4900]: I0127 12:30:00.832950 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:30:02 crc kubenswrapper[4900]: I0127 12:30:02.819730 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"faf912e9b4c89e7d56aefb7f2cd71bbd15163c24d6235d613dcf8652f5aaa16a"} Jan 27 12:30:03 crc kubenswrapper[4900]: I0127 12:30:03.825599 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:30:03 crc kubenswrapper[4900]: I0127 12:30:03.825664 4900 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:30:03 crc kubenswrapper[4900]: I0127 12:30:03.826165 4900 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:30:03 crc kubenswrapper[4900]: I0127 12:30:03.841198 4900 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:30:03 crc kubenswrapper[4900]: I0127 12:30:03.850574 4900 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cab6a0dd-86cd-4a5f-9397-c7f3ea105220\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a7016ed91476e9c84bd638ad111464aa10e1dc083a86addd04ca64dce0f6bee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:29:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://14c675c1aa587a57e7132663d0c743584d10630145227e4b5e8d6a104ad31db5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e5857aaa1a837d2a5c75684044bf63be507e6e56b6dc1bb33119e2d26d1c4fa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf912e9b4c89e7d56aefb7f2cd71bbd15163c24d6235d613dcf8652f5aaa16a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:30:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b225785a043f3a0e06dbff68b01ed1f97759682e9e0596243272f3e3640b17df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-27T12:30:00Z\\\"}}}],\\\"phase\\\":\\\"Running\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": pods \"kube-apiserver-crc\" not found" Jan 27 12:30:04 crc kubenswrapper[4900]: I0127 12:30:04.508294 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:30:04 crc kubenswrapper[4900]: I0127 12:30:04.508352 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:30:04 crc kubenswrapper[4900]: I0127 12:30:04.514613 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:30:04 crc kubenswrapper[4900]: I0127 12:30:04.518307 4900 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="fd821afa-9c6b-46f6-82c4-bebd9763ee30" Jan 27 12:30:04 crc kubenswrapper[4900]: I0127 12:30:04.831813 4900 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:30:04 crc kubenswrapper[4900]: I0127 12:30:04.831867 4900 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:30:05 crc kubenswrapper[4900]: I0127 12:30:05.838030 4900 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:30:05 crc kubenswrapper[4900]: I0127 12:30:05.838355 4900 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:30:05 crc kubenswrapper[4900]: I0127 12:30:05.843418 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:30:06 crc kubenswrapper[4900]: I0127 12:30:06.544792 4900 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="fd821afa-9c6b-46f6-82c4-bebd9763ee30" Jan 27 12:30:06 crc kubenswrapper[4900]: I0127 12:30:06.845029 4900 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:30:06 crc kubenswrapper[4900]: I0127 12:30:06.845171 4900 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:30:06 crc kubenswrapper[4900]: I0127 12:30:06.849707 4900 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="fd821afa-9c6b-46f6-82c4-bebd9763ee30" Jan 27 12:30:11 crc kubenswrapper[4900]: I0127 12:30:11.770586 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 27 12:30:12 crc kubenswrapper[4900]: I0127 12:30:12.051528 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 27 12:30:12 crc kubenswrapper[4900]: I0127 12:30:12.308192 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 27 12:30:12 crc kubenswrapper[4900]: I0127 12:30:12.362858 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 27 12:30:12 crc kubenswrapper[4900]: I0127 12:30:12.734048 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 27 12:30:12 crc kubenswrapper[4900]: I0127 12:30:12.767481 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 27 12:30:12 crc kubenswrapper[4900]: I0127 12:30:12.768811 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 27 12:30:12 crc kubenswrapper[4900]: I0127 12:30:12.954706 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 27 12:30:12 crc kubenswrapper[4900]: I0127 12:30:12.980696 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 27 12:30:12 crc kubenswrapper[4900]: I0127 12:30:12.983655 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 27 12:30:12 crc kubenswrapper[4900]: I0127 12:30:12.986929 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 27 12:30:13 crc kubenswrapper[4900]: I0127 12:30:13.252482 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 27 12:30:13 crc kubenswrapper[4900]: I0127 12:30:13.410004 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 27 12:30:13 crc kubenswrapper[4900]: I0127 12:30:13.435093 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 27 12:30:13 crc kubenswrapper[4900]: I0127 12:30:13.512314 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 27 12:30:13 crc kubenswrapper[4900]: I0127 12:30:13.525254 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 27 12:30:13 crc kubenswrapper[4900]: I0127 12:30:13.551323 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 27 12:30:13 crc kubenswrapper[4900]: I0127 12:30:13.716366 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 27 12:30:13 crc kubenswrapper[4900]: I0127 12:30:13.785572 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 27 12:30:13 crc kubenswrapper[4900]: I0127 12:30:13.800472 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 27 12:30:13 crc kubenswrapper[4900]: I0127 12:30:13.827261 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 27 12:30:13 crc kubenswrapper[4900]: I0127 12:30:13.838309 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 27 12:30:13 crc kubenswrapper[4900]: I0127 12:30:13.878309 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 27 12:30:13 crc kubenswrapper[4900]: I0127 12:30:13.899548 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 27 12:30:14 crc kubenswrapper[4900]: I0127 12:30:14.080087 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 27 12:30:14 crc kubenswrapper[4900]: I0127 12:30:14.102659 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 27 12:30:14 crc kubenswrapper[4900]: I0127 12:30:14.109344 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 27 12:30:14 crc kubenswrapper[4900]: I0127 12:30:14.253681 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 27 12:30:14 crc kubenswrapper[4900]: I0127 12:30:14.273949 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 27 12:30:14 crc kubenswrapper[4900]: I0127 12:30:14.393147 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 27 12:30:14 crc kubenswrapper[4900]: I0127 12:30:14.421713 4900 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 27 12:30:14 crc kubenswrapper[4900]: I0127 12:30:14.796214 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 27 12:30:14 crc kubenswrapper[4900]: I0127 12:30:14.885089 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.062521 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.074683 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.118522 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.173548 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.388552 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.420330 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.470675 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.477946 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.478618 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.479381 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.506198 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.774848 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.780037 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.782902 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.788953 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 27 12:30:15 crc kubenswrapper[4900]: I0127 12:30:15.860341 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.100080 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.165176 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.176427 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.181542 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.213170 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.362339 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.385712 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.390736 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.405762 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.454740 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.533706 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.536257 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.604300 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.673006 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.770156 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.832757 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.888615 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.931626 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 27 12:30:16 crc kubenswrapper[4900]: I0127 12:30:16.972123 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.048312 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.067858 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.154492 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.175376 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.212761 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.237233 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.274199 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.411102 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.460753 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.475722 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.492267 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.567706 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.568378 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.590449 4900 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.615524 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.640733 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.707234 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.760237 4900 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.826050 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 27 12:30:17 crc kubenswrapper[4900]: I0127 12:30:17.888706 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.005187 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.102830 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.123261 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.137886 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.162177 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.191071 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.265423 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.269976 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.294730 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.310848 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.362755 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.404666 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.666417 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.706018 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.710734 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.748868 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.856325 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 27 12:30:18 crc kubenswrapper[4900]: I0127 12:30:18.987896 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 27 12:30:19 crc kubenswrapper[4900]: I0127 12:30:19.032902 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 27 12:30:19 crc kubenswrapper[4900]: I0127 12:30:19.065380 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 27 12:30:19 crc kubenswrapper[4900]: I0127 12:30:19.091657 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 27 12:30:19 crc kubenswrapper[4900]: I0127 12:30:19.129963 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 27 12:30:19 crc kubenswrapper[4900]: I0127 12:30:19.243666 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 27 12:30:19 crc kubenswrapper[4900]: I0127 12:30:19.371845 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 27 12:30:19 crc kubenswrapper[4900]: I0127 12:30:19.572485 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 27 12:30:19 crc kubenswrapper[4900]: I0127 12:30:19.573419 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 27 12:30:19 crc kubenswrapper[4900]: I0127 12:30:19.614657 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 27 12:30:19 crc kubenswrapper[4900]: I0127 12:30:19.781329 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.083968 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.084416 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.084652 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.084746 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.085045 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.221934 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.227647 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.233045 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.257850 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.258559 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.264770 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.275050 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.286021 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.630817 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.632656 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.637794 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.640212 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.655857 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.686853 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.783965 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.788630 4900 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 27 12:30:20 crc kubenswrapper[4900]: I0127 12:30:20.876472 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.033803 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.041989 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.073467 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.088728 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.129081 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.178480 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.211322 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.222634 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.274771 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.437755 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.450983 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.463732 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.476937 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.480096 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.502852 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.563929 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.611246 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.679826 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.721518 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.730304 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.819223 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.845350 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.867189 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.892220 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 27 12:30:21 crc kubenswrapper[4900]: I0127 12:30:21.999299 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.009094 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.071497 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.103740 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.181309 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.277145 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.280261 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.327893 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.336276 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.372735 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.372846 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.372913 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.373869 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"799d1a174787b0e5e1d0fbe4dd1d8784b4f6076c631410ab8be8af90a5f4fc67"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.373999 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://799d1a174787b0e5e1d0fbe4dd1d8784b4f6076c631410ab8be8af90a5f4fc67" gracePeriod=600 Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.423525 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.424153 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.444866 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.489923 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.508016 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.509116 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.605661 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.647996 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.695529 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.758049 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.783237 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.860474 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.967836 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="799d1a174787b0e5e1d0fbe4dd1d8784b4f6076c631410ab8be8af90a5f4fc67" exitCode=0 Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.967916 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"799d1a174787b0e5e1d0fbe4dd1d8784b4f6076c631410ab8be8af90a5f4fc67"} Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.967962 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"ef8a105b9e3140aef604e358616ef8284c481fa15985cc985f8c4ca4dcabda76"} Jan 27 12:30:22 crc kubenswrapper[4900]: I0127 12:30:22.977457 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.014952 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.028688 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.147326 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.167356 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.180786 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.275578 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.365825 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.397654 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.423804 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.506010 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.508769 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.573763 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.597147 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.624832 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.649789 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.708217 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.725518 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.856522 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 27 12:30:23 crc kubenswrapper[4900]: I0127 12:30:23.993042 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.066787 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.071423 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.278275 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.322728 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.385449 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.416072 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.488228 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.524828 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.533375 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.633593 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.671919 4900 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.672521 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-q5sf2" podStartSLOduration=40.148280171 podStartE2EDuration="2m48.672492463s" podCreationTimestamp="2026-01-27 12:27:36 +0000 UTC" firstStartedPulling="2026-01-27 12:27:42.17847507 +0000 UTC m=+89.415503280" lastFinishedPulling="2026-01-27 12:29:50.702687362 +0000 UTC m=+217.939715572" observedRunningTime="2026-01-27 12:30:01.676133111 +0000 UTC m=+228.913161321" watchObservedRunningTime="2026-01-27 12:30:24.672492463 +0000 UTC m=+251.909520703" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.674632 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9s4cn" podStartSLOduration=37.141911274 podStartE2EDuration="2m45.67462219s" podCreationTimestamp="2026-01-27 12:27:39 +0000 UTC" firstStartedPulling="2026-01-27 12:27:42.16212376 +0000 UTC m=+89.399151970" lastFinishedPulling="2026-01-27 12:29:50.694834676 +0000 UTC m=+217.931862886" observedRunningTime="2026-01-27 12:30:01.541887335 +0000 UTC m=+228.778915545" watchObservedRunningTime="2026-01-27 12:30:24.67462219 +0000 UTC m=+251.911650400" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.674864 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jvcrt" podStartSLOduration=41.830941453 podStartE2EDuration="2m48.674858438s" podCreationTimestamp="2026-01-27 12:27:36 +0000 UTC" firstStartedPulling="2026-01-27 12:27:41.068223872 +0000 UTC m=+88.305252082" lastFinishedPulling="2026-01-27 12:29:47.912140857 +0000 UTC m=+215.149169067" observedRunningTime="2026-01-27 12:30:01.620673703 +0000 UTC m=+228.857701933" watchObservedRunningTime="2026-01-27 12:30:24.674858438 +0000 UTC m=+251.911886648" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.675541 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-grrcf" podStartSLOduration=56.591568841 podStartE2EDuration="2m48.675533739s" podCreationTimestamp="2026-01-27 12:27:36 +0000 UTC" firstStartedPulling="2026-01-27 12:27:41.074205244 +0000 UTC m=+88.311233454" lastFinishedPulling="2026-01-27 12:29:33.158170142 +0000 UTC m=+200.395198352" observedRunningTime="2026-01-27 12:30:01.569229661 +0000 UTC m=+228.806257871" watchObservedRunningTime="2026-01-27 12:30:24.675533739 +0000 UTC m=+251.912561949" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.675856 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.676436 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-n9dph" podStartSLOduration=41.137619799 podStartE2EDuration="2m48.676431187s" podCreationTimestamp="2026-01-27 12:27:36 +0000 UTC" firstStartedPulling="2026-01-27 12:27:42.178459859 +0000 UTC m=+89.415488069" lastFinishedPulling="2026-01-27 12:29:49.717271247 +0000 UTC m=+216.954299457" observedRunningTime="2026-01-27 12:30:01.709136335 +0000 UTC m=+228.946164535" watchObservedRunningTime="2026-01-27 12:30:24.676431187 +0000 UTC m=+251.913459397" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.676877 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.677185 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.677637 4900 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.677684 4900 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="cab6a0dd-86cd-4a5f-9397-c7f3ea105220" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.700647 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.701657 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=21.701633096 podStartE2EDuration="21.701633096s" podCreationTimestamp="2026-01-27 12:30:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:30:24.696104633 +0000 UTC m=+251.933132863" watchObservedRunningTime="2026-01-27 12:30:24.701633096 +0000 UTC m=+251.938661306" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.815187 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.983923 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.991389 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 27 12:30:24 crc kubenswrapper[4900]: I0127 12:30:24.997196 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 27 12:30:25 crc kubenswrapper[4900]: I0127 12:30:25.222565 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 27 12:30:25 crc kubenswrapper[4900]: I0127 12:30:25.332474 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 27 12:30:25 crc kubenswrapper[4900]: I0127 12:30:25.387914 4900 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 27 12:30:25 crc kubenswrapper[4900]: I0127 12:30:25.416953 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 27 12:30:25 crc kubenswrapper[4900]: I0127 12:30:25.645654 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 27 12:30:25 crc kubenswrapper[4900]: I0127 12:30:25.687849 4900 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 27 12:30:25 crc kubenswrapper[4900]: I0127 12:30:25.688423 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://0e5fb78b0b2de44f2fbc29b56c67b6c850b41c71a304b6812fddb5d073f381b8" gracePeriod=5 Jan 27 12:30:25 crc kubenswrapper[4900]: I0127 12:30:25.878930 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 27 12:30:25 crc kubenswrapper[4900]: I0127 12:30:25.969921 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 27 12:30:26 crc kubenswrapper[4900]: I0127 12:30:26.014892 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 27 12:30:26 crc kubenswrapper[4900]: I0127 12:30:26.031007 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 27 12:30:26 crc kubenswrapper[4900]: I0127 12:30:26.250753 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 27 12:30:26 crc kubenswrapper[4900]: I0127 12:30:26.302612 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 27 12:30:26 crc kubenswrapper[4900]: I0127 12:30:26.373631 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 27 12:30:26 crc kubenswrapper[4900]: I0127 12:30:26.563333 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 27 12:30:26 crc kubenswrapper[4900]: I0127 12:30:26.609477 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 27 12:30:26 crc kubenswrapper[4900]: I0127 12:30:26.877985 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 27 12:30:26 crc kubenswrapper[4900]: I0127 12:30:26.906651 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.002274 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.058989 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.105754 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.154534 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.190910 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.232018 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.378190 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.465349 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.871464 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.909594 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.945750 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv"] Jan 27 12:30:27 crc kubenswrapper[4900]: E0127 12:30:27.946426 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.946461 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 27 12:30:27 crc kubenswrapper[4900]: E0127 12:30:27.946494 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="465537a5-84fe-4c45-ac00-337055e94686" containerName="installer" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.946504 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="465537a5-84fe-4c45-ac00-337055e94686" containerName="installer" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.946695 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="465537a5-84fe-4c45-ac00-337055e94686" containerName="installer" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.946728 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.947534 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.949680 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.950381 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.958370 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv"] Jan 27 12:30:27 crc kubenswrapper[4900]: I0127 12:30:27.976793 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 27 12:30:28 crc kubenswrapper[4900]: I0127 12:30:28.058681 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/524f0966-1edf-43b9-a459-415c57363842-config-volume\") pod \"collect-profiles-29491950-nnsrv\" (UID: \"524f0966-1edf-43b9-a459-415c57363842\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" Jan 27 12:30:28 crc kubenswrapper[4900]: I0127 12:30:28.058775 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/524f0966-1edf-43b9-a459-415c57363842-secret-volume\") pod \"collect-profiles-29491950-nnsrv\" (UID: \"524f0966-1edf-43b9-a459-415c57363842\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" Jan 27 12:30:28 crc kubenswrapper[4900]: I0127 12:30:28.058802 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvgjb\" (UniqueName: \"kubernetes.io/projected/524f0966-1edf-43b9-a459-415c57363842-kube-api-access-vvgjb\") pod \"collect-profiles-29491950-nnsrv\" (UID: \"524f0966-1edf-43b9-a459-415c57363842\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" Jan 27 12:30:28 crc kubenswrapper[4900]: I0127 12:30:28.160687 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/524f0966-1edf-43b9-a459-415c57363842-config-volume\") pod \"collect-profiles-29491950-nnsrv\" (UID: \"524f0966-1edf-43b9-a459-415c57363842\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" Jan 27 12:30:28 crc kubenswrapper[4900]: I0127 12:30:28.160758 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/524f0966-1edf-43b9-a459-415c57363842-secret-volume\") pod \"collect-profiles-29491950-nnsrv\" (UID: \"524f0966-1edf-43b9-a459-415c57363842\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" Jan 27 12:30:28 crc kubenswrapper[4900]: I0127 12:30:28.160795 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvgjb\" (UniqueName: \"kubernetes.io/projected/524f0966-1edf-43b9-a459-415c57363842-kube-api-access-vvgjb\") pod \"collect-profiles-29491950-nnsrv\" (UID: \"524f0966-1edf-43b9-a459-415c57363842\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" Jan 27 12:30:28 crc kubenswrapper[4900]: I0127 12:30:28.161832 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/524f0966-1edf-43b9-a459-415c57363842-config-volume\") pod \"collect-profiles-29491950-nnsrv\" (UID: \"524f0966-1edf-43b9-a459-415c57363842\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" Jan 27 12:30:28 crc kubenswrapper[4900]: I0127 12:30:28.169784 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/524f0966-1edf-43b9-a459-415c57363842-secret-volume\") pod \"collect-profiles-29491950-nnsrv\" (UID: \"524f0966-1edf-43b9-a459-415c57363842\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" Jan 27 12:30:28 crc kubenswrapper[4900]: I0127 12:30:28.192120 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvgjb\" (UniqueName: \"kubernetes.io/projected/524f0966-1edf-43b9-a459-415c57363842-kube-api-access-vvgjb\") pod \"collect-profiles-29491950-nnsrv\" (UID: \"524f0966-1edf-43b9-a459-415c57363842\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" Jan 27 12:30:28 crc kubenswrapper[4900]: I0127 12:30:28.264314 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" Jan 27 12:30:28 crc kubenswrapper[4900]: I0127 12:30:28.707738 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv"] Jan 27 12:30:29 crc kubenswrapper[4900]: I0127 12:30:29.013833 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" event={"ID":"524f0966-1edf-43b9-a459-415c57363842","Type":"ContainerStarted","Data":"a2a2cc21e11008ab339d9c253948538245bf4d7cc6309cf970334ccde3b8143e"} Jan 27 12:30:29 crc kubenswrapper[4900]: I0127 12:30:29.013903 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" event={"ID":"524f0966-1edf-43b9-a459-415c57363842","Type":"ContainerStarted","Data":"7c423ae71a7c558adef3465a0930f0ec620b30ba532f4022b08d86bce08dc777"} Jan 27 12:30:30 crc kubenswrapper[4900]: I0127 12:30:30.020730 4900 generic.go:334] "Generic (PLEG): container finished" podID="524f0966-1edf-43b9-a459-415c57363842" containerID="a2a2cc21e11008ab339d9c253948538245bf4d7cc6309cf970334ccde3b8143e" exitCode=0 Jan 27 12:30:30 crc kubenswrapper[4900]: I0127 12:30:30.020977 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" event={"ID":"524f0966-1edf-43b9-a459-415c57363842","Type":"ContainerDied","Data":"a2a2cc21e11008ab339d9c253948538245bf4d7cc6309cf970334ccde3b8143e"} Jan 27 12:30:30 crc kubenswrapper[4900]: I0127 12:30:30.859332 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 27 12:30:30 crc kubenswrapper[4900]: I0127 12:30:30.859808 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.005851 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.005914 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.006013 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.006032 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.006158 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.006190 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.006361 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.006329 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.006455 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.007207 4900 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.007232 4900 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.007243 4900 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.007256 4900 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.021464 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.031443 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.031517 4900 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="0e5fb78b0b2de44f2fbc29b56c67b6c850b41c71a304b6812fddb5d073f381b8" exitCode=137 Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.031653 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.031697 4900 scope.go:117] "RemoveContainer" containerID="0e5fb78b0b2de44f2fbc29b56c67b6c850b41c71a304b6812fddb5d073f381b8" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.067465 4900 scope.go:117] "RemoveContainer" containerID="0e5fb78b0b2de44f2fbc29b56c67b6c850b41c71a304b6812fddb5d073f381b8" Jan 27 12:30:31 crc kubenswrapper[4900]: E0127 12:30:31.068868 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e5fb78b0b2de44f2fbc29b56c67b6c850b41c71a304b6812fddb5d073f381b8\": container with ID starting with 0e5fb78b0b2de44f2fbc29b56c67b6c850b41c71a304b6812fddb5d073f381b8 not found: ID does not exist" containerID="0e5fb78b0b2de44f2fbc29b56c67b6c850b41c71a304b6812fddb5d073f381b8" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.068951 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e5fb78b0b2de44f2fbc29b56c67b6c850b41c71a304b6812fddb5d073f381b8"} err="failed to get container status \"0e5fb78b0b2de44f2fbc29b56c67b6c850b41c71a304b6812fddb5d073f381b8\": rpc error: code = NotFound desc = could not find container \"0e5fb78b0b2de44f2fbc29b56c67b6c850b41c71a304b6812fddb5d073f381b8\": container with ID starting with 0e5fb78b0b2de44f2fbc29b56c67b6c850b41c71a304b6812fddb5d073f381b8 not found: ID does not exist" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.108832 4900 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.265299 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.412977 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/524f0966-1edf-43b9-a459-415c57363842-config-volume\") pod \"524f0966-1edf-43b9-a459-415c57363842\" (UID: \"524f0966-1edf-43b9-a459-415c57363842\") " Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.413038 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvgjb\" (UniqueName: \"kubernetes.io/projected/524f0966-1edf-43b9-a459-415c57363842-kube-api-access-vvgjb\") pod \"524f0966-1edf-43b9-a459-415c57363842\" (UID: \"524f0966-1edf-43b9-a459-415c57363842\") " Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.413129 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/524f0966-1edf-43b9-a459-415c57363842-secret-volume\") pod \"524f0966-1edf-43b9-a459-415c57363842\" (UID: \"524f0966-1edf-43b9-a459-415c57363842\") " Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.414351 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/524f0966-1edf-43b9-a459-415c57363842-config-volume" (OuterVolumeSpecName: "config-volume") pod "524f0966-1edf-43b9-a459-415c57363842" (UID: "524f0966-1edf-43b9-a459-415c57363842"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.493376 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/524f0966-1edf-43b9-a459-415c57363842-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "524f0966-1edf-43b9-a459-415c57363842" (UID: "524f0966-1edf-43b9-a459-415c57363842"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.493476 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/524f0966-1edf-43b9-a459-415c57363842-kube-api-access-vvgjb" (OuterVolumeSpecName: "kube-api-access-vvgjb") pod "524f0966-1edf-43b9-a459-415c57363842" (UID: "524f0966-1edf-43b9-a459-415c57363842"). InnerVolumeSpecName "kube-api-access-vvgjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.514665 4900 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/524f0966-1edf-43b9-a459-415c57363842-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.514701 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvgjb\" (UniqueName: \"kubernetes.io/projected/524f0966-1edf-43b9-a459-415c57363842-kube-api-access-vvgjb\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:31 crc kubenswrapper[4900]: I0127 12:30:31.514720 4900 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/524f0966-1edf-43b9-a459-415c57363842-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:32 crc kubenswrapper[4900]: I0127 12:30:32.040597 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" event={"ID":"524f0966-1edf-43b9-a459-415c57363842","Type":"ContainerDied","Data":"7c423ae71a7c558adef3465a0930f0ec620b30ba532f4022b08d86bce08dc777"} Jan 27 12:30:32 crc kubenswrapper[4900]: I0127 12:30:32.040917 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c423ae71a7c558adef3465a0930f0ec620b30ba532f4022b08d86bce08dc777" Jan 27 12:30:32 crc kubenswrapper[4900]: I0127 12:30:32.040707 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv" Jan 27 12:30:32 crc kubenswrapper[4900]: I0127 12:30:32.489435 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 27 12:30:42 crc kubenswrapper[4900]: I0127 12:30:42.906840 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-n8cfh"] Jan 27 12:30:42 crc kubenswrapper[4900]: I0127 12:30:42.907671 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" podUID="f693c83e-d25c-4f46-bbb6-fd38195cde95" containerName="controller-manager" containerID="cri-o://8faa920a20e23b45c8b27f4a265ebd3ae522098b0d9dd02c2128f1841e648fc7" gracePeriod=30 Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.008722 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm"] Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.009572 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" podUID="9080a9b0-9613-4077-bbbc-6ff558b4180c" containerName="route-controller-manager" containerID="cri-o://0d9f813b00e6e233cd0f9be25e1300d7b0ee80f394dfd0ffb090005f3abf52d2" gracePeriod=30 Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.131727 4900 generic.go:334] "Generic (PLEG): container finished" podID="f693c83e-d25c-4f46-bbb6-fd38195cde95" containerID="8faa920a20e23b45c8b27f4a265ebd3ae522098b0d9dd02c2128f1841e648fc7" exitCode=0 Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.131804 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" event={"ID":"f693c83e-d25c-4f46-bbb6-fd38195cde95","Type":"ContainerDied","Data":"8faa920a20e23b45c8b27f4a265ebd3ae522098b0d9dd02c2128f1841e648fc7"} Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.454469 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.588245 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f693c83e-d25c-4f46-bbb6-fd38195cde95-serving-cert\") pod \"f693c83e-d25c-4f46-bbb6-fd38195cde95\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.588315 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-client-ca\") pod \"f693c83e-d25c-4f46-bbb6-fd38195cde95\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.588378 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-proxy-ca-bundles\") pod \"f693c83e-d25c-4f46-bbb6-fd38195cde95\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.588435 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fx6r9\" (UniqueName: \"kubernetes.io/projected/f693c83e-d25c-4f46-bbb6-fd38195cde95-kube-api-access-fx6r9\") pod \"f693c83e-d25c-4f46-bbb6-fd38195cde95\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.588473 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-config\") pod \"f693c83e-d25c-4f46-bbb6-fd38195cde95\" (UID: \"f693c83e-d25c-4f46-bbb6-fd38195cde95\") " Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.589713 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "f693c83e-d25c-4f46-bbb6-fd38195cde95" (UID: "f693c83e-d25c-4f46-bbb6-fd38195cde95"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.589837 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-config" (OuterVolumeSpecName: "config") pod "f693c83e-d25c-4f46-bbb6-fd38195cde95" (UID: "f693c83e-d25c-4f46-bbb6-fd38195cde95"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.590147 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-client-ca" (OuterVolumeSpecName: "client-ca") pod "f693c83e-d25c-4f46-bbb6-fd38195cde95" (UID: "f693c83e-d25c-4f46-bbb6-fd38195cde95"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.603703 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.689442 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9080a9b0-9613-4077-bbbc-6ff558b4180c-config\") pod \"9080a9b0-9613-4077-bbbc-6ff558b4180c\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.689553 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlwj8\" (UniqueName: \"kubernetes.io/projected/9080a9b0-9613-4077-bbbc-6ff558b4180c-kube-api-access-rlwj8\") pod \"9080a9b0-9613-4077-bbbc-6ff558b4180c\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.689602 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9080a9b0-9613-4077-bbbc-6ff558b4180c-serving-cert\") pod \"9080a9b0-9613-4077-bbbc-6ff558b4180c\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.689626 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9080a9b0-9613-4077-bbbc-6ff558b4180c-client-ca\") pod \"9080a9b0-9613-4077-bbbc-6ff558b4180c\" (UID: \"9080a9b0-9613-4077-bbbc-6ff558b4180c\") " Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.689878 4900 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.689897 4900 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.689910 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f693c83e-d25c-4f46-bbb6-fd38195cde95-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.691305 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9080a9b0-9613-4077-bbbc-6ff558b4180c-client-ca" (OuterVolumeSpecName: "client-ca") pod "9080a9b0-9613-4077-bbbc-6ff558b4180c" (UID: "9080a9b0-9613-4077-bbbc-6ff558b4180c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.691386 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9080a9b0-9613-4077-bbbc-6ff558b4180c-config" (OuterVolumeSpecName: "config") pod "9080a9b0-9613-4077-bbbc-6ff558b4180c" (UID: "9080a9b0-9613-4077-bbbc-6ff558b4180c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.692553 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f693c83e-d25c-4f46-bbb6-fd38195cde95-kube-api-access-fx6r9" (OuterVolumeSpecName: "kube-api-access-fx6r9") pod "f693c83e-d25c-4f46-bbb6-fd38195cde95" (UID: "f693c83e-d25c-4f46-bbb6-fd38195cde95"). InnerVolumeSpecName "kube-api-access-fx6r9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.692605 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f693c83e-d25c-4f46-bbb6-fd38195cde95-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f693c83e-d25c-4f46-bbb6-fd38195cde95" (UID: "f693c83e-d25c-4f46-bbb6-fd38195cde95"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.693634 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9080a9b0-9613-4077-bbbc-6ff558b4180c-kube-api-access-rlwj8" (OuterVolumeSpecName: "kube-api-access-rlwj8") pod "9080a9b0-9613-4077-bbbc-6ff558b4180c" (UID: "9080a9b0-9613-4077-bbbc-6ff558b4180c"). InnerVolumeSpecName "kube-api-access-rlwj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.694355 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9080a9b0-9613-4077-bbbc-6ff558b4180c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9080a9b0-9613-4077-bbbc-6ff558b4180c" (UID: "9080a9b0-9613-4077-bbbc-6ff558b4180c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.791112 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9080a9b0-9613-4077-bbbc-6ff558b4180c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.791169 4900 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9080a9b0-9613-4077-bbbc-6ff558b4180c-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.791184 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f693c83e-d25c-4f46-bbb6-fd38195cde95-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.791196 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9080a9b0-9613-4077-bbbc-6ff558b4180c-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.791210 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlwj8\" (UniqueName: \"kubernetes.io/projected/9080a9b0-9613-4077-bbbc-6ff558b4180c-kube-api-access-rlwj8\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:43 crc kubenswrapper[4900]: I0127 12:30:43.791223 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fx6r9\" (UniqueName: \"kubernetes.io/projected/f693c83e-d25c-4f46-bbb6-fd38195cde95-kube-api-access-fx6r9\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.069092 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc"] Jan 27 12:30:44 crc kubenswrapper[4900]: E0127 12:30:44.069380 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f693c83e-d25c-4f46-bbb6-fd38195cde95" containerName="controller-manager" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.069396 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f693c83e-d25c-4f46-bbb6-fd38195cde95" containerName="controller-manager" Jan 27 12:30:44 crc kubenswrapper[4900]: E0127 12:30:44.069410 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="524f0966-1edf-43b9-a459-415c57363842" containerName="collect-profiles" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.069419 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="524f0966-1edf-43b9-a459-415c57363842" containerName="collect-profiles" Jan 27 12:30:44 crc kubenswrapper[4900]: E0127 12:30:44.069429 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9080a9b0-9613-4077-bbbc-6ff558b4180c" containerName="route-controller-manager" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.069435 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="9080a9b0-9613-4077-bbbc-6ff558b4180c" containerName="route-controller-manager" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.069582 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="524f0966-1edf-43b9-a459-415c57363842" containerName="collect-profiles" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.069596 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f693c83e-d25c-4f46-bbb6-fd38195cde95" containerName="controller-manager" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.069607 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="9080a9b0-9613-4077-bbbc-6ff558b4180c" containerName="route-controller-manager" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.070025 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.074651 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr"] Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.075289 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.087503 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr"] Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.094005 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc"] Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.141911 4900 generic.go:334] "Generic (PLEG): container finished" podID="9080a9b0-9613-4077-bbbc-6ff558b4180c" containerID="0d9f813b00e6e233cd0f9be25e1300d7b0ee80f394dfd0ffb090005f3abf52d2" exitCode=0 Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.142002 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.142042 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" event={"ID":"9080a9b0-9613-4077-bbbc-6ff558b4180c","Type":"ContainerDied","Data":"0d9f813b00e6e233cd0f9be25e1300d7b0ee80f394dfd0ffb090005f3abf52d2"} Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.142622 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm" event={"ID":"9080a9b0-9613-4077-bbbc-6ff558b4180c","Type":"ContainerDied","Data":"ef5d4c2efcf8578aa8cfb3785df5a32fc9349c2cc687334be69d96a5932ff136"} Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.142674 4900 scope.go:117] "RemoveContainer" containerID="0d9f813b00e6e233cd0f9be25e1300d7b0ee80f394dfd0ffb090005f3abf52d2" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.151942 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" event={"ID":"f693c83e-d25c-4f46-bbb6-fd38195cde95","Type":"ContainerDied","Data":"59013904bd1cec7f4cfdb89246afe7e3b64cc8c8e8bf66e716828c626139650e"} Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.152087 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-n8cfh" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.159696 4900 scope.go:117] "RemoveContainer" containerID="0d9f813b00e6e233cd0f9be25e1300d7b0ee80f394dfd0ffb090005f3abf52d2" Jan 27 12:30:44 crc kubenswrapper[4900]: E0127 12:30:44.160305 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d9f813b00e6e233cd0f9be25e1300d7b0ee80f394dfd0ffb090005f3abf52d2\": container with ID starting with 0d9f813b00e6e233cd0f9be25e1300d7b0ee80f394dfd0ffb090005f3abf52d2 not found: ID does not exist" containerID="0d9f813b00e6e233cd0f9be25e1300d7b0ee80f394dfd0ffb090005f3abf52d2" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.160367 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d9f813b00e6e233cd0f9be25e1300d7b0ee80f394dfd0ffb090005f3abf52d2"} err="failed to get container status \"0d9f813b00e6e233cd0f9be25e1300d7b0ee80f394dfd0ffb090005f3abf52d2\": rpc error: code = NotFound desc = could not find container \"0d9f813b00e6e233cd0f9be25e1300d7b0ee80f394dfd0ffb090005f3abf52d2\": container with ID starting with 0d9f813b00e6e233cd0f9be25e1300d7b0ee80f394dfd0ffb090005f3abf52d2 not found: ID does not exist" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.160435 4900 scope.go:117] "RemoveContainer" containerID="8faa920a20e23b45c8b27f4a265ebd3ae522098b0d9dd02c2128f1841e648fc7" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.187592 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm"] Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.191419 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-52vsm"] Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.195725 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fa603bc4-eae2-4337-aea1-d38bf390b099-client-ca\") pod \"route-controller-manager-5f78d9c568-82nrc\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.195770 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-proxy-ca-bundles\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.195798 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-client-ca\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.195829 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4110c131-ff73-4467-9692-ee5011e6549e-serving-cert\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.195857 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-config\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.195903 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa603bc4-eae2-4337-aea1-d38bf390b099-config\") pod \"route-controller-manager-5f78d9c568-82nrc\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.195924 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cx5c\" (UniqueName: \"kubernetes.io/projected/4110c131-ff73-4467-9692-ee5011e6549e-kube-api-access-2cx5c\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.195949 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzt8x\" (UniqueName: \"kubernetes.io/projected/fa603bc4-eae2-4337-aea1-d38bf390b099-kube-api-access-lzt8x\") pod \"route-controller-manager-5f78d9c568-82nrc\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.195986 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fa603bc4-eae2-4337-aea1-d38bf390b099-serving-cert\") pod \"route-controller-manager-5f78d9c568-82nrc\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.202755 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-n8cfh"] Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.208646 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-n8cfh"] Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.298034 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-config\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.298151 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa603bc4-eae2-4337-aea1-d38bf390b099-config\") pod \"route-controller-manager-5f78d9c568-82nrc\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.298192 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cx5c\" (UniqueName: \"kubernetes.io/projected/4110c131-ff73-4467-9692-ee5011e6549e-kube-api-access-2cx5c\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.298224 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzt8x\" (UniqueName: \"kubernetes.io/projected/fa603bc4-eae2-4337-aea1-d38bf390b099-kube-api-access-lzt8x\") pod \"route-controller-manager-5f78d9c568-82nrc\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.298252 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fa603bc4-eae2-4337-aea1-d38bf390b099-serving-cert\") pod \"route-controller-manager-5f78d9c568-82nrc\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.298288 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fa603bc4-eae2-4337-aea1-d38bf390b099-client-ca\") pod \"route-controller-manager-5f78d9c568-82nrc\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.298319 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-proxy-ca-bundles\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.298351 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-client-ca\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.298392 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4110c131-ff73-4467-9692-ee5011e6549e-serving-cert\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.299525 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa603bc4-eae2-4337-aea1-d38bf390b099-config\") pod \"route-controller-manager-5f78d9c568-82nrc\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.299529 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fa603bc4-eae2-4337-aea1-d38bf390b099-client-ca\") pod \"route-controller-manager-5f78d9c568-82nrc\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.299826 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-config\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.299984 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-client-ca\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.301441 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-proxy-ca-bundles\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.303463 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4110c131-ff73-4467-9692-ee5011e6549e-serving-cert\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.303950 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fa603bc4-eae2-4337-aea1-d38bf390b099-serving-cert\") pod \"route-controller-manager-5f78d9c568-82nrc\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.321386 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cx5c\" (UniqueName: \"kubernetes.io/projected/4110c131-ff73-4467-9692-ee5011e6549e-kube-api-access-2cx5c\") pod \"controller-manager-7f6b76f4d8-jh7xr\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.322251 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzt8x\" (UniqueName: \"kubernetes.io/projected/fa603bc4-eae2-4337-aea1-d38bf390b099-kube-api-access-lzt8x\") pod \"route-controller-manager-5f78d9c568-82nrc\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.389404 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.398908 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.494381 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9080a9b0-9613-4077-bbbc-6ff558b4180c" path="/var/lib/kubelet/pods/9080a9b0-9613-4077-bbbc-6ff558b4180c/volumes" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.494925 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f693c83e-d25c-4f46-bbb6-fd38195cde95" path="/var/lib/kubelet/pods/f693c83e-d25c-4f46-bbb6-fd38195cde95/volumes" Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.602663 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc"] Jan 27 12:30:44 crc kubenswrapper[4900]: I0127 12:30:44.647078 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr"] Jan 27 12:30:44 crc kubenswrapper[4900]: W0127 12:30:44.649019 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4110c131_ff73_4467_9692_ee5011e6549e.slice/crio-caa9429d2eaca6ba42d2be544353fe3df187f14ff537e85dd85b430353340754 WatchSource:0}: Error finding container caa9429d2eaca6ba42d2be544353fe3df187f14ff537e85dd85b430353340754: Status 404 returned error can't find the container with id caa9429d2eaca6ba42d2be544353fe3df187f14ff537e85dd85b430353340754 Jan 27 12:30:45 crc kubenswrapper[4900]: I0127 12:30:45.161086 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" event={"ID":"4110c131-ff73-4467-9692-ee5011e6549e","Type":"ContainerStarted","Data":"d7a80ec53819d7dd89d0821e42dad840c815f8fc95d226de8d584f65a2e7c02f"} Jan 27 12:30:45 crc kubenswrapper[4900]: I0127 12:30:45.161726 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:45 crc kubenswrapper[4900]: I0127 12:30:45.161743 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" event={"ID":"4110c131-ff73-4467-9692-ee5011e6549e","Type":"ContainerStarted","Data":"caa9429d2eaca6ba42d2be544353fe3df187f14ff537e85dd85b430353340754"} Jan 27 12:30:45 crc kubenswrapper[4900]: I0127 12:30:45.167368 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" event={"ID":"fa603bc4-eae2-4337-aea1-d38bf390b099","Type":"ContainerStarted","Data":"b17b1b057d36775b28969f86ce8c7ec471fbca6a26fc629aaaa986d40efc8e6c"} Jan 27 12:30:45 crc kubenswrapper[4900]: I0127 12:30:45.167401 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" event={"ID":"fa603bc4-eae2-4337-aea1-d38bf390b099","Type":"ContainerStarted","Data":"6bf00ca59bc1ea2a6ad5b23fb890aee740fa6a9c7610d2266ae4aa9c35fb91ec"} Jan 27 12:30:45 crc kubenswrapper[4900]: I0127 12:30:45.167575 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:45 crc kubenswrapper[4900]: I0127 12:30:45.172716 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:45 crc kubenswrapper[4900]: I0127 12:30:45.189159 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" podStartSLOduration=3.189112204 podStartE2EDuration="3.189112204s" podCreationTimestamp="2026-01-27 12:30:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:30:45.186530878 +0000 UTC m=+272.423559088" watchObservedRunningTime="2026-01-27 12:30:45.189112204 +0000 UTC m=+272.426140434" Jan 27 12:30:45 crc kubenswrapper[4900]: I0127 12:30:45.220524 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" podStartSLOduration=2.220491165 podStartE2EDuration="2.220491165s" podCreationTimestamp="2026-01-27 12:30:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:30:45.214768415 +0000 UTC m=+272.451796645" watchObservedRunningTime="2026-01-27 12:30:45.220491165 +0000 UTC m=+272.457519385" Jan 27 12:30:45 crc kubenswrapper[4900]: I0127 12:30:45.240720 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:46 crc kubenswrapper[4900]: I0127 12:30:46.887695 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 27 12:30:50 crc kubenswrapper[4900]: I0127 12:30:50.499879 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr"] Jan 27 12:30:50 crc kubenswrapper[4900]: I0127 12:30:50.500857 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" podUID="4110c131-ff73-4467-9692-ee5011e6549e" containerName="controller-manager" containerID="cri-o://d7a80ec53819d7dd89d0821e42dad840c815f8fc95d226de8d584f65a2e7c02f" gracePeriod=30 Jan 27 12:30:50 crc kubenswrapper[4900]: I0127 12:30:50.527269 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc"] Jan 27 12:30:50 crc kubenswrapper[4900]: I0127 12:30:50.527643 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" podUID="fa603bc4-eae2-4337-aea1-d38bf390b099" containerName="route-controller-manager" containerID="cri-o://b17b1b057d36775b28969f86ce8c7ec471fbca6a26fc629aaaa986d40efc8e6c" gracePeriod=30 Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.078621 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.086744 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.213638 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cx5c\" (UniqueName: \"kubernetes.io/projected/4110c131-ff73-4467-9692-ee5011e6549e-kube-api-access-2cx5c\") pod \"4110c131-ff73-4467-9692-ee5011e6549e\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.213864 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa603bc4-eae2-4337-aea1-d38bf390b099-config\") pod \"fa603bc4-eae2-4337-aea1-d38bf390b099\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.213927 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-config\") pod \"4110c131-ff73-4467-9692-ee5011e6549e\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.214090 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fa603bc4-eae2-4337-aea1-d38bf390b099-serving-cert\") pod \"fa603bc4-eae2-4337-aea1-d38bf390b099\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.214130 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-proxy-ca-bundles\") pod \"4110c131-ff73-4467-9692-ee5011e6549e\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.214184 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fa603bc4-eae2-4337-aea1-d38bf390b099-client-ca\") pod \"fa603bc4-eae2-4337-aea1-d38bf390b099\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.214214 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4110c131-ff73-4467-9692-ee5011e6549e-serving-cert\") pod \"4110c131-ff73-4467-9692-ee5011e6549e\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.214253 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzt8x\" (UniqueName: \"kubernetes.io/projected/fa603bc4-eae2-4337-aea1-d38bf390b099-kube-api-access-lzt8x\") pod \"fa603bc4-eae2-4337-aea1-d38bf390b099\" (UID: \"fa603bc4-eae2-4337-aea1-d38bf390b099\") " Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.214306 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-client-ca\") pod \"4110c131-ff73-4467-9692-ee5011e6549e\" (UID: \"4110c131-ff73-4467-9692-ee5011e6549e\") " Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.215411 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa603bc4-eae2-4337-aea1-d38bf390b099-client-ca" (OuterVolumeSpecName: "client-ca") pod "fa603bc4-eae2-4337-aea1-d38bf390b099" (UID: "fa603bc4-eae2-4337-aea1-d38bf390b099"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.215449 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa603bc4-eae2-4337-aea1-d38bf390b099-config" (OuterVolumeSpecName: "config") pod "fa603bc4-eae2-4337-aea1-d38bf390b099" (UID: "fa603bc4-eae2-4337-aea1-d38bf390b099"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.215499 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-client-ca" (OuterVolumeSpecName: "client-ca") pod "4110c131-ff73-4467-9692-ee5011e6549e" (UID: "4110c131-ff73-4467-9692-ee5011e6549e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.215509 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "4110c131-ff73-4467-9692-ee5011e6549e" (UID: "4110c131-ff73-4467-9692-ee5011e6549e"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.216181 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-config" (OuterVolumeSpecName: "config") pod "4110c131-ff73-4467-9692-ee5011e6549e" (UID: "4110c131-ff73-4467-9692-ee5011e6549e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.218669 4900 generic.go:334] "Generic (PLEG): container finished" podID="fa603bc4-eae2-4337-aea1-d38bf390b099" containerID="b17b1b057d36775b28969f86ce8c7ec471fbca6a26fc629aaaa986d40efc8e6c" exitCode=0 Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.218729 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.218786 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" event={"ID":"fa603bc4-eae2-4337-aea1-d38bf390b099","Type":"ContainerDied","Data":"b17b1b057d36775b28969f86ce8c7ec471fbca6a26fc629aaaa986d40efc8e6c"} Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.218831 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc" event={"ID":"fa603bc4-eae2-4337-aea1-d38bf390b099","Type":"ContainerDied","Data":"6bf00ca59bc1ea2a6ad5b23fb890aee740fa6a9c7610d2266ae4aa9c35fb91ec"} Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.218857 4900 scope.go:117] "RemoveContainer" containerID="b17b1b057d36775b28969f86ce8c7ec471fbca6a26fc629aaaa986d40efc8e6c" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.220702 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa603bc4-eae2-4337-aea1-d38bf390b099-kube-api-access-lzt8x" (OuterVolumeSpecName: "kube-api-access-lzt8x") pod "fa603bc4-eae2-4337-aea1-d38bf390b099" (UID: "fa603bc4-eae2-4337-aea1-d38bf390b099"). InnerVolumeSpecName "kube-api-access-lzt8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.220769 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4110c131-ff73-4467-9692-ee5011e6549e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4110c131-ff73-4467-9692-ee5011e6549e" (UID: "4110c131-ff73-4467-9692-ee5011e6549e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.221018 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4110c131-ff73-4467-9692-ee5011e6549e-kube-api-access-2cx5c" (OuterVolumeSpecName: "kube-api-access-2cx5c") pod "4110c131-ff73-4467-9692-ee5011e6549e" (UID: "4110c131-ff73-4467-9692-ee5011e6549e"). InnerVolumeSpecName "kube-api-access-2cx5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.221296 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa603bc4-eae2-4337-aea1-d38bf390b099-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "fa603bc4-eae2-4337-aea1-d38bf390b099" (UID: "fa603bc4-eae2-4337-aea1-d38bf390b099"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.226380 4900 generic.go:334] "Generic (PLEG): container finished" podID="4110c131-ff73-4467-9692-ee5011e6549e" containerID="d7a80ec53819d7dd89d0821e42dad840c815f8fc95d226de8d584f65a2e7c02f" exitCode=0 Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.226442 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" event={"ID":"4110c131-ff73-4467-9692-ee5011e6549e","Type":"ContainerDied","Data":"d7a80ec53819d7dd89d0821e42dad840c815f8fc95d226de8d584f65a2e7c02f"} Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.226458 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.226485 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr" event={"ID":"4110c131-ff73-4467-9692-ee5011e6549e","Type":"ContainerDied","Data":"caa9429d2eaca6ba42d2be544353fe3df187f14ff537e85dd85b430353340754"} Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.243919 4900 scope.go:117] "RemoveContainer" containerID="b17b1b057d36775b28969f86ce8c7ec471fbca6a26fc629aaaa986d40efc8e6c" Jan 27 12:30:51 crc kubenswrapper[4900]: E0127 12:30:51.244386 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b17b1b057d36775b28969f86ce8c7ec471fbca6a26fc629aaaa986d40efc8e6c\": container with ID starting with b17b1b057d36775b28969f86ce8c7ec471fbca6a26fc629aaaa986d40efc8e6c not found: ID does not exist" containerID="b17b1b057d36775b28969f86ce8c7ec471fbca6a26fc629aaaa986d40efc8e6c" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.244431 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b17b1b057d36775b28969f86ce8c7ec471fbca6a26fc629aaaa986d40efc8e6c"} err="failed to get container status \"b17b1b057d36775b28969f86ce8c7ec471fbca6a26fc629aaaa986d40efc8e6c\": rpc error: code = NotFound desc = could not find container \"b17b1b057d36775b28969f86ce8c7ec471fbca6a26fc629aaaa986d40efc8e6c\": container with ID starting with b17b1b057d36775b28969f86ce8c7ec471fbca6a26fc629aaaa986d40efc8e6c not found: ID does not exist" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.244458 4900 scope.go:117] "RemoveContainer" containerID="d7a80ec53819d7dd89d0821e42dad840c815f8fc95d226de8d584f65a2e7c02f" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.261452 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr"] Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.265031 4900 scope.go:117] "RemoveContainer" containerID="d7a80ec53819d7dd89d0821e42dad840c815f8fc95d226de8d584f65a2e7c02f" Jan 27 12:30:51 crc kubenswrapper[4900]: E0127 12:30:51.265693 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7a80ec53819d7dd89d0821e42dad840c815f8fc95d226de8d584f65a2e7c02f\": container with ID starting with d7a80ec53819d7dd89d0821e42dad840c815f8fc95d226de8d584f65a2e7c02f not found: ID does not exist" containerID="d7a80ec53819d7dd89d0821e42dad840c815f8fc95d226de8d584f65a2e7c02f" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.265741 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7a80ec53819d7dd89d0821e42dad840c815f8fc95d226de8d584f65a2e7c02f"} err="failed to get container status \"d7a80ec53819d7dd89d0821e42dad840c815f8fc95d226de8d584f65a2e7c02f\": rpc error: code = NotFound desc = could not find container \"d7a80ec53819d7dd89d0821e42dad840c815f8fc95d226de8d584f65a2e7c02f\": container with ID starting with d7a80ec53819d7dd89d0821e42dad840c815f8fc95d226de8d584f65a2e7c02f not found: ID does not exist" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.267497 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-7f6b76f4d8-jh7xr"] Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.315864 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa603bc4-eae2-4337-aea1-d38bf390b099-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.316228 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.316315 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fa603bc4-eae2-4337-aea1-d38bf390b099-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.316424 4900 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.316515 4900 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fa603bc4-eae2-4337-aea1-d38bf390b099-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.316606 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4110c131-ff73-4467-9692-ee5011e6549e-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.316697 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzt8x\" (UniqueName: \"kubernetes.io/projected/fa603bc4-eae2-4337-aea1-d38bf390b099-kube-api-access-lzt8x\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.316776 4900 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4110c131-ff73-4467-9692-ee5011e6549e-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.316837 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cx5c\" (UniqueName: \"kubernetes.io/projected/4110c131-ff73-4467-9692-ee5011e6549e-kube-api-access-2cx5c\") on node \"crc\" DevicePath \"\"" Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.547165 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc"] Jan 27 12:30:51 crc kubenswrapper[4900]: I0127 12:30:51.553077 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f78d9c568-82nrc"] Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.083983 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-58bf4dd977-zg5lj"] Jan 27 12:30:52 crc kubenswrapper[4900]: E0127 12:30:52.084553 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa603bc4-eae2-4337-aea1-d38bf390b099" containerName="route-controller-manager" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.084580 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa603bc4-eae2-4337-aea1-d38bf390b099" containerName="route-controller-manager" Jan 27 12:30:52 crc kubenswrapper[4900]: E0127 12:30:52.084595 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4110c131-ff73-4467-9692-ee5011e6549e" containerName="controller-manager" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.084602 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="4110c131-ff73-4467-9692-ee5011e6549e" containerName="controller-manager" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.084745 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa603bc4-eae2-4337-aea1-d38bf390b099" containerName="route-controller-manager" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.084762 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="4110c131-ff73-4467-9692-ee5011e6549e" containerName="controller-manager" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.085470 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.089134 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.089460 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.089711 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.089774 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.091537 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.092730 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp"] Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.093278 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.093794 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.098131 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.098370 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.098742 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.098827 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.098877 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.099270 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.103768 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.105907 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58bf4dd977-zg5lj"] Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.113240 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp"] Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.228786 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-config\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.228885 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d394107b-ac5e-44aa-81a3-bf7977cc0471-serving-cert\") pod \"route-controller-manager-69d97d594-qg6mp\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.228919 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-proxy-ca-bundles\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.228958 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d394107b-ac5e-44aa-81a3-bf7977cc0471-config\") pod \"route-controller-manager-69d97d594-qg6mp\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.229114 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d394107b-ac5e-44aa-81a3-bf7977cc0471-client-ca\") pod \"route-controller-manager-69d97d594-qg6mp\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.229229 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-client-ca\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.229300 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsvqk\" (UniqueName: \"kubernetes.io/projected/d394107b-ac5e-44aa-81a3-bf7977cc0471-kube-api-access-bsvqk\") pod \"route-controller-manager-69d97d594-qg6mp\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.229417 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29e64c5e-0e69-4c9e-b3fc-948f68647981-serving-cert\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.229500 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfvdf\" (UniqueName: \"kubernetes.io/projected/29e64c5e-0e69-4c9e-b3fc-948f68647981-kube-api-access-lfvdf\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.331112 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-config\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.331195 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d394107b-ac5e-44aa-81a3-bf7977cc0471-serving-cert\") pod \"route-controller-manager-69d97d594-qg6mp\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.331224 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-proxy-ca-bundles\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.331272 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d394107b-ac5e-44aa-81a3-bf7977cc0471-config\") pod \"route-controller-manager-69d97d594-qg6mp\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.331296 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d394107b-ac5e-44aa-81a3-bf7977cc0471-client-ca\") pod \"route-controller-manager-69d97d594-qg6mp\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.331324 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-client-ca\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.331356 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsvqk\" (UniqueName: \"kubernetes.io/projected/d394107b-ac5e-44aa-81a3-bf7977cc0471-kube-api-access-bsvqk\") pod \"route-controller-manager-69d97d594-qg6mp\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.331384 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29e64c5e-0e69-4c9e-b3fc-948f68647981-serving-cert\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.331413 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfvdf\" (UniqueName: \"kubernetes.io/projected/29e64c5e-0e69-4c9e-b3fc-948f68647981-kube-api-access-lfvdf\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.332955 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-proxy-ca-bundles\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.333272 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-client-ca\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.333309 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-config\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.333567 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d394107b-ac5e-44aa-81a3-bf7977cc0471-config\") pod \"route-controller-manager-69d97d594-qg6mp\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.333683 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d394107b-ac5e-44aa-81a3-bf7977cc0471-client-ca\") pod \"route-controller-manager-69d97d594-qg6mp\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.337190 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29e64c5e-0e69-4c9e-b3fc-948f68647981-serving-cert\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.337191 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d394107b-ac5e-44aa-81a3-bf7977cc0471-serving-cert\") pod \"route-controller-manager-69d97d594-qg6mp\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.354668 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfvdf\" (UniqueName: \"kubernetes.io/projected/29e64c5e-0e69-4c9e-b3fc-948f68647981-kube-api-access-lfvdf\") pod \"controller-manager-58bf4dd977-zg5lj\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.356177 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsvqk\" (UniqueName: \"kubernetes.io/projected/d394107b-ac5e-44aa-81a3-bf7977cc0471-kube-api-access-bsvqk\") pod \"route-controller-manager-69d97d594-qg6mp\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.412790 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.432524 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.492641 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4110c131-ff73-4467-9692-ee5011e6549e" path="/var/lib/kubelet/pods/4110c131-ff73-4467-9692-ee5011e6549e/volumes" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.493347 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa603bc4-eae2-4337-aea1-d38bf390b099" path="/var/lib/kubelet/pods/fa603bc4-eae2-4337-aea1-d38bf390b099/volumes" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.635359 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.668758 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58bf4dd977-zg5lj"] Jan 27 12:30:52 crc kubenswrapper[4900]: I0127 12:30:52.937522 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp"] Jan 27 12:30:53 crc kubenswrapper[4900]: I0127 12:30:53.246000 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" event={"ID":"d394107b-ac5e-44aa-81a3-bf7977cc0471","Type":"ContainerStarted","Data":"86022ad51511f18924bc4519ebb64e5ccfe862edddf8cebe75173a8497eebbd1"} Jan 27 12:30:53 crc kubenswrapper[4900]: I0127 12:30:53.246122 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" event={"ID":"d394107b-ac5e-44aa-81a3-bf7977cc0471","Type":"ContainerStarted","Data":"4d24cfa8647fcd8579daa5c5de7cf0d6e44244b847bf73b0ae8d2a57bd968524"} Jan 27 12:30:53 crc kubenswrapper[4900]: I0127 12:30:53.246154 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:53 crc kubenswrapper[4900]: I0127 12:30:53.248810 4900 patch_prober.go:28] interesting pod/route-controller-manager-69d97d594-qg6mp container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Jan 27 12:30:53 crc kubenswrapper[4900]: I0127 12:30:53.249005 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" podUID="d394107b-ac5e-44aa-81a3-bf7977cc0471" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" Jan 27 12:30:53 crc kubenswrapper[4900]: I0127 12:30:53.253843 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" event={"ID":"29e64c5e-0e69-4c9e-b3fc-948f68647981","Type":"ContainerStarted","Data":"1dd66f8f06ef22e65adaa1c8c58be889c47e4144db1359db3ca36e0f5f5c76c9"} Jan 27 12:30:53 crc kubenswrapper[4900]: I0127 12:30:53.253940 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" event={"ID":"29e64c5e-0e69-4c9e-b3fc-948f68647981","Type":"ContainerStarted","Data":"2ccc29a19ed6cda3a7c594ce7c50c8b74ae72e0007a5629aaa9c0365ae41ab1f"} Jan 27 12:30:53 crc kubenswrapper[4900]: I0127 12:30:53.258245 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:53 crc kubenswrapper[4900]: I0127 12:30:53.291032 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:30:53 crc kubenswrapper[4900]: I0127 12:30:53.299610 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" podStartSLOduration=3.299578946 podStartE2EDuration="3.299578946s" podCreationTimestamp="2026-01-27 12:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:30:53.293543877 +0000 UTC m=+280.530572087" watchObservedRunningTime="2026-01-27 12:30:53.299578946 +0000 UTC m=+280.536607156" Jan 27 12:30:53 crc kubenswrapper[4900]: I0127 12:30:53.321590 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" podStartSLOduration=3.321560578 podStartE2EDuration="3.321560578s" podCreationTimestamp="2026-01-27 12:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:30:53.31824693 +0000 UTC m=+280.555275160" watchObservedRunningTime="2026-01-27 12:30:53.321560578 +0000 UTC m=+280.558588788" Jan 27 12:30:54 crc kubenswrapper[4900]: I0127 12:30:54.270637 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:30:55 crc kubenswrapper[4900]: I0127 12:30:55.604956 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 27 12:30:55 crc kubenswrapper[4900]: I0127 12:30:55.770544 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 27 12:31:02 crc kubenswrapper[4900]: I0127 12:31:02.654395 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 27 12:31:02 crc kubenswrapper[4900]: I0127 12:31:02.889125 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58bf4dd977-zg5lj"] Jan 27 12:31:02 crc kubenswrapper[4900]: I0127 12:31:02.889413 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" podUID="29e64c5e-0e69-4c9e-b3fc-948f68647981" containerName="controller-manager" containerID="cri-o://1dd66f8f06ef22e65adaa1c8c58be889c47e4144db1359db3ca36e0f5f5c76c9" gracePeriod=30 Jan 27 12:31:03 crc kubenswrapper[4900]: I0127 12:31:03.321089 4900 generic.go:334] "Generic (PLEG): container finished" podID="29e64c5e-0e69-4c9e-b3fc-948f68647981" containerID="1dd66f8f06ef22e65adaa1c8c58be889c47e4144db1359db3ca36e0f5f5c76c9" exitCode=0 Jan 27 12:31:03 crc kubenswrapper[4900]: I0127 12:31:03.321555 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" event={"ID":"29e64c5e-0e69-4c9e-b3fc-948f68647981","Type":"ContainerDied","Data":"1dd66f8f06ef22e65adaa1c8c58be889c47e4144db1359db3ca36e0f5f5c76c9"} Jan 27 12:31:03 crc kubenswrapper[4900]: I0127 12:31:03.669303 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 27 12:31:03 crc kubenswrapper[4900]: I0127 12:31:03.941530 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.280225 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.321284 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-b487f875d-mh5q7"] Jan 27 12:31:04 crc kubenswrapper[4900]: E0127 12:31:04.321642 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29e64c5e-0e69-4c9e-b3fc-948f68647981" containerName="controller-manager" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.321662 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="29e64c5e-0e69-4c9e-b3fc-948f68647981" containerName="controller-manager" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.321818 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="29e64c5e-0e69-4c9e-b3fc-948f68647981" containerName="controller-manager" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.322452 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.331167 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" event={"ID":"29e64c5e-0e69-4c9e-b3fc-948f68647981","Type":"ContainerDied","Data":"2ccc29a19ed6cda3a7c594ce7c50c8b74ae72e0007a5629aaa9c0365ae41ab1f"} Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.331232 4900 scope.go:117] "RemoveContainer" containerID="1dd66f8f06ef22e65adaa1c8c58be889c47e4144db1359db3ca36e0f5f5c76c9" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.331370 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58bf4dd977-zg5lj" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.355678 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-b487f875d-mh5q7"] Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.411045 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29e64c5e-0e69-4c9e-b3fc-948f68647981-serving-cert\") pod \"29e64c5e-0e69-4c9e-b3fc-948f68647981\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.411188 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lfvdf\" (UniqueName: \"kubernetes.io/projected/29e64c5e-0e69-4c9e-b3fc-948f68647981-kube-api-access-lfvdf\") pod \"29e64c5e-0e69-4c9e-b3fc-948f68647981\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.411236 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-client-ca\") pod \"29e64c5e-0e69-4c9e-b3fc-948f68647981\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.411281 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-proxy-ca-bundles\") pod \"29e64c5e-0e69-4c9e-b3fc-948f68647981\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.411342 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-config\") pod \"29e64c5e-0e69-4c9e-b3fc-948f68647981\" (UID: \"29e64c5e-0e69-4c9e-b3fc-948f68647981\") " Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.412732 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-client-ca" (OuterVolumeSpecName: "client-ca") pod "29e64c5e-0e69-4c9e-b3fc-948f68647981" (UID: "29e64c5e-0e69-4c9e-b3fc-948f68647981"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.412820 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-config" (OuterVolumeSpecName: "config") pod "29e64c5e-0e69-4c9e-b3fc-948f68647981" (UID: "29e64c5e-0e69-4c9e-b3fc-948f68647981"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.413155 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "29e64c5e-0e69-4c9e-b3fc-948f68647981" (UID: "29e64c5e-0e69-4c9e-b3fc-948f68647981"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.421327 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29e64c5e-0e69-4c9e-b3fc-948f68647981-kube-api-access-lfvdf" (OuterVolumeSpecName: "kube-api-access-lfvdf") pod "29e64c5e-0e69-4c9e-b3fc-948f68647981" (UID: "29e64c5e-0e69-4c9e-b3fc-948f68647981"). InnerVolumeSpecName "kube-api-access-lfvdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.421940 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29e64c5e-0e69-4c9e-b3fc-948f68647981-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "29e64c5e-0e69-4c9e-b3fc-948f68647981" (UID: "29e64c5e-0e69-4c9e-b3fc-948f68647981"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.512994 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-serving-cert\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.513251 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7zhc\" (UniqueName: \"kubernetes.io/projected/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-kube-api-access-g7zhc\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.513352 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-config\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.513396 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-proxy-ca-bundles\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.513617 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-client-ca\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.513818 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29e64c5e-0e69-4c9e-b3fc-948f68647981-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.513841 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lfvdf\" (UniqueName: \"kubernetes.io/projected/29e64c5e-0e69-4c9e-b3fc-948f68647981-kube-api-access-lfvdf\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.513856 4900 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.513866 4900 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.513881 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29e64c5e-0e69-4c9e-b3fc-948f68647981-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.615770 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7zhc\" (UniqueName: \"kubernetes.io/projected/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-kube-api-access-g7zhc\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.615835 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-config\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.615866 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-proxy-ca-bundles\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.615965 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-client-ca\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.616093 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-serving-cert\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.617449 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-client-ca\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.617471 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-proxy-ca-bundles\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.617740 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-config\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.622229 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-serving-cert\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.642151 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7zhc\" (UniqueName: \"kubernetes.io/projected/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-kube-api-access-g7zhc\") pod \"controller-manager-b487f875d-mh5q7\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.661465 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58bf4dd977-zg5lj"] Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.667566 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-58bf4dd977-zg5lj"] Jan 27 12:31:04 crc kubenswrapper[4900]: I0127 12:31:04.941672 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:05 crc kubenswrapper[4900]: I0127 12:31:05.275587 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-b487f875d-mh5q7"] Jan 27 12:31:05 crc kubenswrapper[4900]: W0127 12:31:05.287008 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c16cfa4_ecaa_40a9_8ad6_d74691ddf3ed.slice/crio-ee1f06a67ac8078362640580cceca34edd4495e6ce5ebc63db2cad3d7bcb18a8 WatchSource:0}: Error finding container ee1f06a67ac8078362640580cceca34edd4495e6ce5ebc63db2cad3d7bcb18a8: Status 404 returned error can't find the container with id ee1f06a67ac8078362640580cceca34edd4495e6ce5ebc63db2cad3d7bcb18a8 Jan 27 12:31:05 crc kubenswrapper[4900]: I0127 12:31:05.346856 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" event={"ID":"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed","Type":"ContainerStarted","Data":"ee1f06a67ac8078362640580cceca34edd4495e6ce5ebc63db2cad3d7bcb18a8"} Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.040012 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q5sf2"] Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.047952 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-q5sf2" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" containerName="registry-server" containerID="cri-o://6bdcf83296f0fd0cf6e4ff8a0d72376166c2e1abddb85359d63db8a5b40dba14" gracePeriod=2 Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.203167 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-n9dph"] Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.205883 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-n9dph" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" containerName="registry-server" containerID="cri-o://71e2cfc2a809ab7b3df8b70baa8b21d0ebcaf2b6023d922dba2b5a4af66b0289" gracePeriod=2 Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.360129 4900 generic.go:334] "Generic (PLEG): container finished" podID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" containerID="71e2cfc2a809ab7b3df8b70baa8b21d0ebcaf2b6023d922dba2b5a4af66b0289" exitCode=0 Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.360233 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n9dph" event={"ID":"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57","Type":"ContainerDied","Data":"71e2cfc2a809ab7b3df8b70baa8b21d0ebcaf2b6023d922dba2b5a4af66b0289"} Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.364562 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" event={"ID":"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed","Type":"ContainerStarted","Data":"f778f5949dd28633bad9f16cd51d1c15b206ff65d5535fbceecdbf044040cdc5"} Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.364913 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.376739 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.379462 4900 generic.go:334] "Generic (PLEG): container finished" podID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" containerID="6bdcf83296f0fd0cf6e4ff8a0d72376166c2e1abddb85359d63db8a5b40dba14" exitCode=0 Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.379566 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5sf2" event={"ID":"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4","Type":"ContainerDied","Data":"6bdcf83296f0fd0cf6e4ff8a0d72376166c2e1abddb85359d63db8a5b40dba14"} Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.441932 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" podStartSLOduration=4.441896514 podStartE2EDuration="4.441896514s" podCreationTimestamp="2026-01-27 12:31:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:31:06.402041103 +0000 UTC m=+293.639069313" watchObservedRunningTime="2026-01-27 12:31:06.441896514 +0000 UTC m=+293.678924724" Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.494535 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29e64c5e-0e69-4c9e-b3fc-948f68647981" path="/var/lib/kubelet/pods/29e64c5e-0e69-4c9e-b3fc-948f68647981/volumes" Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.794956 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.910313 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-catalog-content\") pod \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\" (UID: \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\") " Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.910397 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-utilities\") pod \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\" (UID: \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\") " Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.910709 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fptvd\" (UniqueName: \"kubernetes.io/projected/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-kube-api-access-fptvd\") pod \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\" (UID: \"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57\") " Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.917234 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-utilities" (OuterVolumeSpecName: "utilities") pod "79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" (UID: "79e4aadf-f1cf-44f9-af87-a3a4b1e06c57"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:31:06 crc kubenswrapper[4900]: I0127 12:31:06.936643 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-kube-api-access-fptvd" (OuterVolumeSpecName: "kube-api-access-fptvd") pod "79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" (UID: "79e4aadf-f1cf-44f9-af87-a3a4b1e06c57"). InnerVolumeSpecName "kube-api-access-fptvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.012299 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.012341 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fptvd\" (UniqueName: \"kubernetes.io/projected/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-kube-api-access-fptvd\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.049534 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" (UID: "79e4aadf-f1cf-44f9-af87-a3a4b1e06c57"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.115025 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.226941 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.391232 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q5sf2" event={"ID":"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4","Type":"ContainerDied","Data":"bb29ae9f384ff8c7938771cbe7bf622b000ffb7eddf3e55a066cdd2b98af2d87"} Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.391341 4900 scope.go:117] "RemoveContainer" containerID="6bdcf83296f0fd0cf6e4ff8a0d72376166c2e1abddb85359d63db8a5b40dba14" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.391742 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q5sf2" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.394771 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n9dph" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.394897 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n9dph" event={"ID":"79e4aadf-f1cf-44f9-af87-a3a4b1e06c57","Type":"ContainerDied","Data":"f4a3aab88d8f6b62f491cbd1fe8958ed5fb4f49ed29b3f34bb23f96a3580cfc1"} Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.416012 4900 scope.go:117] "RemoveContainer" containerID="e3af023d7adf7f632efb7e0fd6942143d3c6955f8bbac1a56d859414731e92ce" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.419605 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8582r\" (UniqueName: \"kubernetes.io/projected/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-kube-api-access-8582r\") pod \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\" (UID: \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\") " Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.419878 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-catalog-content\") pod \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\" (UID: \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\") " Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.419966 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-utilities\") pod \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\" (UID: \"c7ff526b-ed0d-42d2-98bf-2f237d76cbf4\") " Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.422738 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-utilities" (OuterVolumeSpecName: "utilities") pod "c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" (UID: "c7ff526b-ed0d-42d2-98bf-2f237d76cbf4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.447176 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-n9dph"] Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.447362 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-kube-api-access-8582r" (OuterVolumeSpecName: "kube-api-access-8582r") pod "c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" (UID: "c7ff526b-ed0d-42d2-98bf-2f237d76cbf4"). InnerVolumeSpecName "kube-api-access-8582r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.454543 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-n9dph"] Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.462680 4900 scope.go:117] "RemoveContainer" containerID="92a1e7095fb608e0ee1b7302815d6ce5a9164e37e26940f243053074313999fa" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.491238 4900 scope.go:117] "RemoveContainer" containerID="71e2cfc2a809ab7b3df8b70baa8b21d0ebcaf2b6023d922dba2b5a4af66b0289" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.506971 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" (UID: "c7ff526b-ed0d-42d2-98bf-2f237d76cbf4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.517345 4900 scope.go:117] "RemoveContainer" containerID="df8c9ca11509273f21cb7cf634f7b7b2d013ce583237c9b00101d565d74afadc" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.522299 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.522340 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.522354 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8582r\" (UniqueName: \"kubernetes.io/projected/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4-kube-api-access-8582r\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.539115 4900 scope.go:117] "RemoveContainer" containerID="c77d204b0d85ef94817a7f2cea935991f21c6df4b85f27cdcfb77778a0556619" Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.735956 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q5sf2"] Jan 27 12:31:07 crc kubenswrapper[4900]: I0127 12:31:07.742698 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-q5sf2"] Jan 27 12:31:08 crc kubenswrapper[4900]: I0127 12:31:08.494190 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" path="/var/lib/kubelet/pods/79e4aadf-f1cf-44f9-af87-a3a4b1e06c57/volumes" Jan 27 12:31:08 crc kubenswrapper[4900]: I0127 12:31:08.495281 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" path="/var/lib/kubelet/pods/c7ff526b-ed0d-42d2-98bf-2f237d76cbf4/volumes" Jan 27 12:31:15 crc kubenswrapper[4900]: I0127 12:31:15.594112 4900 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 27 12:31:22 crc kubenswrapper[4900]: I0127 12:31:22.872422 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-b487f875d-mh5q7"] Jan 27 12:31:22 crc kubenswrapper[4900]: I0127 12:31:22.873301 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" podUID="2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed" containerName="controller-manager" containerID="cri-o://f778f5949dd28633bad9f16cd51d1c15b206ff65d5535fbceecdbf044040cdc5" gracePeriod=30 Jan 27 12:31:22 crc kubenswrapper[4900]: I0127 12:31:22.960753 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp"] Jan 27 12:31:22 crc kubenswrapper[4900]: I0127 12:31:22.960995 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" podUID="d394107b-ac5e-44aa-81a3-bf7977cc0471" containerName="route-controller-manager" containerID="cri-o://86022ad51511f18924bc4519ebb64e5ccfe862edddf8cebe75173a8497eebbd1" gracePeriod=30 Jan 27 12:31:23 crc kubenswrapper[4900]: I0127 12:31:23.515511 4900 generic.go:334] "Generic (PLEG): container finished" podID="2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed" containerID="f778f5949dd28633bad9f16cd51d1c15b206ff65d5535fbceecdbf044040cdc5" exitCode=0 Jan 27 12:31:23 crc kubenswrapper[4900]: I0127 12:31:23.515597 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" event={"ID":"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed","Type":"ContainerDied","Data":"f778f5949dd28633bad9f16cd51d1c15b206ff65d5535fbceecdbf044040cdc5"} Jan 27 12:31:23 crc kubenswrapper[4900]: I0127 12:31:23.517198 4900 generic.go:334] "Generic (PLEG): container finished" podID="d394107b-ac5e-44aa-81a3-bf7977cc0471" containerID="86022ad51511f18924bc4519ebb64e5ccfe862edddf8cebe75173a8497eebbd1" exitCode=0 Jan 27 12:31:23 crc kubenswrapper[4900]: I0127 12:31:23.517258 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" event={"ID":"d394107b-ac5e-44aa-81a3-bf7977cc0471","Type":"ContainerDied","Data":"86022ad51511f18924bc4519ebb64e5ccfe862edddf8cebe75173a8497eebbd1"} Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.319944 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.328755 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.356405 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p"] Jan 27 12:31:24 crc kubenswrapper[4900]: E0127 12:31:24.356750 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed" containerName="controller-manager" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.356773 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed" containerName="controller-manager" Jan 27 12:31:24 crc kubenswrapper[4900]: E0127 12:31:24.356788 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" containerName="extract-content" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.356796 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" containerName="extract-content" Jan 27 12:31:24 crc kubenswrapper[4900]: E0127 12:31:24.356812 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" containerName="extract-utilities" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.356822 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" containerName="extract-utilities" Jan 27 12:31:24 crc kubenswrapper[4900]: E0127 12:31:24.356841 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" containerName="extract-content" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.356849 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" containerName="extract-content" Jan 27 12:31:24 crc kubenswrapper[4900]: E0127 12:31:24.356860 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" containerName="extract-utilities" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.356867 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" containerName="extract-utilities" Jan 27 12:31:24 crc kubenswrapper[4900]: E0127 12:31:24.356881 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" containerName="registry-server" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.356888 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" containerName="registry-server" Jan 27 12:31:24 crc kubenswrapper[4900]: E0127 12:31:24.356898 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" containerName="registry-server" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.356906 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" containerName="registry-server" Jan 27 12:31:24 crc kubenswrapper[4900]: E0127 12:31:24.356921 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d394107b-ac5e-44aa-81a3-bf7977cc0471" containerName="route-controller-manager" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.356932 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="d394107b-ac5e-44aa-81a3-bf7977cc0471" containerName="route-controller-manager" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.357096 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7ff526b-ed0d-42d2-98bf-2f237d76cbf4" containerName="registry-server" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.357115 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed" containerName="controller-manager" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.357128 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="d394107b-ac5e-44aa-81a3-bf7977cc0471" containerName="route-controller-manager" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.357140 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="79e4aadf-f1cf-44f9-af87-a3a4b1e06c57" containerName="registry-server" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.357681 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.382371 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p"] Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.487140 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-config\") pod \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.487222 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d394107b-ac5e-44aa-81a3-bf7977cc0471-client-ca\") pod \"d394107b-ac5e-44aa-81a3-bf7977cc0471\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.487267 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-proxy-ca-bundles\") pod \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.487335 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d394107b-ac5e-44aa-81a3-bf7977cc0471-serving-cert\") pod \"d394107b-ac5e-44aa-81a3-bf7977cc0471\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.487383 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-serving-cert\") pod \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.487418 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7zhc\" (UniqueName: \"kubernetes.io/projected/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-kube-api-access-g7zhc\") pod \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.487495 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bsvqk\" (UniqueName: \"kubernetes.io/projected/d394107b-ac5e-44aa-81a3-bf7977cc0471-kube-api-access-bsvqk\") pod \"d394107b-ac5e-44aa-81a3-bf7977cc0471\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.487520 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-client-ca\") pod \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\" (UID: \"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed\") " Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.487581 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d394107b-ac5e-44aa-81a3-bf7977cc0471-config\") pod \"d394107b-ac5e-44aa-81a3-bf7977cc0471\" (UID: \"d394107b-ac5e-44aa-81a3-bf7977cc0471\") " Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.488297 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d394107b-ac5e-44aa-81a3-bf7977cc0471-client-ca" (OuterVolumeSpecName: "client-ca") pod "d394107b-ac5e-44aa-81a3-bf7977cc0471" (UID: "d394107b-ac5e-44aa-81a3-bf7977cc0471"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.488348 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed" (UID: "2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.488352 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d394107b-ac5e-44aa-81a3-bf7977cc0471-config" (OuterVolumeSpecName: "config") pod "d394107b-ac5e-44aa-81a3-bf7977cc0471" (UID: "d394107b-ac5e-44aa-81a3-bf7977cc0471"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.488833 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4506f060-72b2-4332-9290-4fc7cde14709-client-ca\") pod \"route-controller-manager-7cfb44d9cd-qp92p\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.488903 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4506f060-72b2-4332-9290-4fc7cde14709-serving-cert\") pod \"route-controller-manager-7cfb44d9cd-qp92p\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.489010 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4506f060-72b2-4332-9290-4fc7cde14709-config\") pod \"route-controller-manager-7cfb44d9cd-qp92p\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.489093 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fb29z\" (UniqueName: \"kubernetes.io/projected/4506f060-72b2-4332-9290-4fc7cde14709-kube-api-access-fb29z\") pod \"route-controller-manager-7cfb44d9cd-qp92p\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.489279 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d394107b-ac5e-44aa-81a3-bf7977cc0471-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.489320 4900 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d394107b-ac5e-44aa-81a3-bf7977cc0471-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.489338 4900 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.489878 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-client-ca" (OuterVolumeSpecName: "client-ca") pod "2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed" (UID: "2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.490095 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-config" (OuterVolumeSpecName: "config") pod "2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed" (UID: "2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.494486 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-kube-api-access-g7zhc" (OuterVolumeSpecName: "kube-api-access-g7zhc") pod "2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed" (UID: "2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed"). InnerVolumeSpecName "kube-api-access-g7zhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.494602 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d394107b-ac5e-44aa-81a3-bf7977cc0471-kube-api-access-bsvqk" (OuterVolumeSpecName: "kube-api-access-bsvqk") pod "d394107b-ac5e-44aa-81a3-bf7977cc0471" (UID: "d394107b-ac5e-44aa-81a3-bf7977cc0471"). InnerVolumeSpecName "kube-api-access-bsvqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.494621 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d394107b-ac5e-44aa-81a3-bf7977cc0471-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d394107b-ac5e-44aa-81a3-bf7977cc0471" (UID: "d394107b-ac5e-44aa-81a3-bf7977cc0471"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.494772 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed" (UID: "2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.526075 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.526046 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp" event={"ID":"d394107b-ac5e-44aa-81a3-bf7977cc0471","Type":"ContainerDied","Data":"4d24cfa8647fcd8579daa5c5de7cf0d6e44244b847bf73b0ae8d2a57bd968524"} Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.526232 4900 scope.go:117] "RemoveContainer" containerID="86022ad51511f18924bc4519ebb64e5ccfe862edddf8cebe75173a8497eebbd1" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.529800 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" event={"ID":"2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed","Type":"ContainerDied","Data":"ee1f06a67ac8078362640580cceca34edd4495e6ce5ebc63db2cad3d7bcb18a8"} Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.529836 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b487f875d-mh5q7" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.550974 4900 scope.go:117] "RemoveContainer" containerID="f778f5949dd28633bad9f16cd51d1c15b206ff65d5535fbceecdbf044040cdc5" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.562828 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp"] Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.570229 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69d97d594-qg6mp"] Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.574533 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-b487f875d-mh5q7"] Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.577648 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-b487f875d-mh5q7"] Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.590827 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fb29z\" (UniqueName: \"kubernetes.io/projected/4506f060-72b2-4332-9290-4fc7cde14709-kube-api-access-fb29z\") pod \"route-controller-manager-7cfb44d9cd-qp92p\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.590961 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4506f060-72b2-4332-9290-4fc7cde14709-client-ca\") pod \"route-controller-manager-7cfb44d9cd-qp92p\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.590993 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4506f060-72b2-4332-9290-4fc7cde14709-serving-cert\") pod \"route-controller-manager-7cfb44d9cd-qp92p\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.591052 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4506f060-72b2-4332-9290-4fc7cde14709-config\") pod \"route-controller-manager-7cfb44d9cd-qp92p\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.591127 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bsvqk\" (UniqueName: \"kubernetes.io/projected/d394107b-ac5e-44aa-81a3-bf7977cc0471-kube-api-access-bsvqk\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.591140 4900 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.591151 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.591172 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d394107b-ac5e-44aa-81a3-bf7977cc0471-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.591181 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.591190 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7zhc\" (UniqueName: \"kubernetes.io/projected/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed-kube-api-access-g7zhc\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.594290 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4506f060-72b2-4332-9290-4fc7cde14709-client-ca\") pod \"route-controller-manager-7cfb44d9cd-qp92p\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.595484 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4506f060-72b2-4332-9290-4fc7cde14709-config\") pod \"route-controller-manager-7cfb44d9cd-qp92p\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.597630 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4506f060-72b2-4332-9290-4fc7cde14709-serving-cert\") pod \"route-controller-manager-7cfb44d9cd-qp92p\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.610336 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fb29z\" (UniqueName: \"kubernetes.io/projected/4506f060-72b2-4332-9290-4fc7cde14709-kube-api-access-fb29z\") pod \"route-controller-manager-7cfb44d9cd-qp92p\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:24 crc kubenswrapper[4900]: I0127 12:31:24.683175 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:25 crc kubenswrapper[4900]: I0127 12:31:25.098799 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p"] Jan 27 12:31:25 crc kubenswrapper[4900]: I0127 12:31:25.539501 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" event={"ID":"4506f060-72b2-4332-9290-4fc7cde14709","Type":"ContainerStarted","Data":"61d345101a9deca8906a195cf53724a0d7ebe0f0bf45067a4180edef3b5792c4"} Jan 27 12:31:26 crc kubenswrapper[4900]: I0127 12:31:26.489040 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed" path="/var/lib/kubelet/pods/2c16cfa4-ecaa-40a9-8ad6-d74691ddf3ed/volumes" Jan 27 12:31:26 crc kubenswrapper[4900]: I0127 12:31:26.490448 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d394107b-ac5e-44aa-81a3-bf7977cc0471" path="/var/lib/kubelet/pods/d394107b-ac5e-44aa-81a3-bf7977cc0471/volumes" Jan 27 12:31:26 crc kubenswrapper[4900]: I0127 12:31:26.552762 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" event={"ID":"4506f060-72b2-4332-9290-4fc7cde14709","Type":"ContainerStarted","Data":"f0fcb4aa8c4e18d48df1e3d0281e2e6796dc697dc6b6848fd57b1408b0e73f68"} Jan 27 12:31:26 crc kubenswrapper[4900]: I0127 12:31:26.553131 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:26 crc kubenswrapper[4900]: I0127 12:31:26.561406 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:26 crc kubenswrapper[4900]: I0127 12:31:26.579668 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" podStartSLOduration=4.579637166 podStartE2EDuration="4.579637166s" podCreationTimestamp="2026-01-27 12:31:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:31:26.578999917 +0000 UTC m=+313.816028137" watchObservedRunningTime="2026-01-27 12:31:26.579637166 +0000 UTC m=+313.816665376" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.108986 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-86d754d577-6cn72"] Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.109881 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.112810 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.113125 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.113262 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.114562 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.114986 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.115166 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.132583 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-86d754d577-6cn72"] Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.133238 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.155361 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-config\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.155446 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-proxy-ca-bundles\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.155507 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-serving-cert\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.155541 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpfgk\" (UniqueName: \"kubernetes.io/projected/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-kube-api-access-lpfgk\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.155614 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-client-ca\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.257009 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-proxy-ca-bundles\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.257138 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-serving-cert\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.257172 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpfgk\" (UniqueName: \"kubernetes.io/projected/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-kube-api-access-lpfgk\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.257201 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-client-ca\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.257266 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-config\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.258722 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-client-ca\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.258810 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-config\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.259680 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-proxy-ca-bundles\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.265100 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-serving-cert\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.276551 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpfgk\" (UniqueName: \"kubernetes.io/projected/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-kube-api-access-lpfgk\") pod \"controller-manager-86d754d577-6cn72\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.428943 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.726591 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-7xgpz"] Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.727679 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.743742 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-7xgpz"] Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.847816 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-86d754d577-6cn72"] Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.866801 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qmp6\" (UniqueName: \"kubernetes.io/projected/286b2993-6426-49f6-8b5a-84a2289602a3-kube-api-access-8qmp6\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.867310 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/286b2993-6426-49f6-8b5a-84a2289602a3-ca-trust-extracted\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.867344 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/286b2993-6426-49f6-8b5a-84a2289602a3-registry-tls\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.867367 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/286b2993-6426-49f6-8b5a-84a2289602a3-registry-certificates\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.867408 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/286b2993-6426-49f6-8b5a-84a2289602a3-installation-pull-secrets\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.867454 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.867493 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/286b2993-6426-49f6-8b5a-84a2289602a3-trusted-ca\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.867517 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/286b2993-6426-49f6-8b5a-84a2289602a3-bound-sa-token\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.889746 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.969030 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/286b2993-6426-49f6-8b5a-84a2289602a3-trusted-ca\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.969152 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/286b2993-6426-49f6-8b5a-84a2289602a3-bound-sa-token\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.969211 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qmp6\" (UniqueName: \"kubernetes.io/projected/286b2993-6426-49f6-8b5a-84a2289602a3-kube-api-access-8qmp6\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.969264 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/286b2993-6426-49f6-8b5a-84a2289602a3-ca-trust-extracted\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.969292 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/286b2993-6426-49f6-8b5a-84a2289602a3-registry-tls\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.969312 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/286b2993-6426-49f6-8b5a-84a2289602a3-registry-certificates\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.969343 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/286b2993-6426-49f6-8b5a-84a2289602a3-installation-pull-secrets\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.971037 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/286b2993-6426-49f6-8b5a-84a2289602a3-ca-trust-extracted\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.971047 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/286b2993-6426-49f6-8b5a-84a2289602a3-trusted-ca\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.971928 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/286b2993-6426-49f6-8b5a-84a2289602a3-registry-certificates\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.977026 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/286b2993-6426-49f6-8b5a-84a2289602a3-installation-pull-secrets\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.977249 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/286b2993-6426-49f6-8b5a-84a2289602a3-registry-tls\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.988383 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qmp6\" (UniqueName: \"kubernetes.io/projected/286b2993-6426-49f6-8b5a-84a2289602a3-kube-api-access-8qmp6\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:27 crc kubenswrapper[4900]: I0127 12:31:27.991773 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/286b2993-6426-49f6-8b5a-84a2289602a3-bound-sa-token\") pod \"image-registry-66df7c8f76-7xgpz\" (UID: \"286b2993-6426-49f6-8b5a-84a2289602a3\") " pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:28 crc kubenswrapper[4900]: I0127 12:31:28.046988 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:28 crc kubenswrapper[4900]: I0127 12:31:28.271848 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-7xgpz"] Jan 27 12:31:28 crc kubenswrapper[4900]: I0127 12:31:28.566897 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" event={"ID":"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a","Type":"ContainerStarted","Data":"0205ca09f928b8bbf73c129a9b2da231b28cb926312d4817757a999ada73d33c"} Jan 27 12:31:28 crc kubenswrapper[4900]: I0127 12:31:28.566954 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" event={"ID":"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a","Type":"ContainerStarted","Data":"4cfc10c7e7b248e54ac2da416bae1666d866f2cef3de2b4a60dd88fe5c3a38d8"} Jan 27 12:31:28 crc kubenswrapper[4900]: I0127 12:31:28.568569 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" event={"ID":"286b2993-6426-49f6-8b5a-84a2289602a3","Type":"ContainerStarted","Data":"0e5b46548b16c2ec6ddc6359f2fece3708669c6bd5253b2ffdc96ac799f271b3"} Jan 27 12:31:29 crc kubenswrapper[4900]: I0127 12:31:29.578313 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" event={"ID":"286b2993-6426-49f6-8b5a-84a2289602a3","Type":"ContainerStarted","Data":"83fbc43e16738d7548c765504fdcf762e50c3eb8de8bc909198ea0800b2289ae"} Jan 27 12:31:29 crc kubenswrapper[4900]: I0127 12:31:29.578825 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:29 crc kubenswrapper[4900]: I0127 12:31:29.585589 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:29 crc kubenswrapper[4900]: I0127 12:31:29.602134 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" podStartSLOduration=7.602099014 podStartE2EDuration="7.602099014s" podCreationTimestamp="2026-01-27 12:31:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:31:29.596356574 +0000 UTC m=+316.833384794" watchObservedRunningTime="2026-01-27 12:31:29.602099014 +0000 UTC m=+316.839127224" Jan 27 12:31:29 crc kubenswrapper[4900]: I0127 12:31:29.624527 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" podStartSLOduration=2.624498398 podStartE2EDuration="2.624498398s" podCreationTimestamp="2026-01-27 12:31:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:31:29.618011956 +0000 UTC m=+316.855040176" watchObservedRunningTime="2026-01-27 12:31:29.624498398 +0000 UTC m=+316.861526608" Jan 27 12:31:30 crc kubenswrapper[4900]: I0127 12:31:30.585767 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:42 crc kubenswrapper[4900]: I0127 12:31:42.841261 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-86d754d577-6cn72"] Jan 27 12:31:42 crc kubenswrapper[4900]: I0127 12:31:42.842394 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" podUID="0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a" containerName="controller-manager" containerID="cri-o://0205ca09f928b8bbf73c129a9b2da231b28cb926312d4817757a999ada73d33c" gracePeriod=30 Jan 27 12:31:42 crc kubenswrapper[4900]: I0127 12:31:42.875988 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p"] Jan 27 12:31:42 crc kubenswrapper[4900]: I0127 12:31:42.876254 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" podUID="4506f060-72b2-4332-9290-4fc7cde14709" containerName="route-controller-manager" containerID="cri-o://f0fcb4aa8c4e18d48df1e3d0281e2e6796dc697dc6b6848fd57b1408b0e73f68" gracePeriod=30 Jan 27 12:31:43 crc kubenswrapper[4900]: I0127 12:31:43.085224 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-wndtt"] Jan 27 12:31:43 crc kubenswrapper[4900]: I0127 12:31:43.678391 4900 generic.go:334] "Generic (PLEG): container finished" podID="0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a" containerID="0205ca09f928b8bbf73c129a9b2da231b28cb926312d4817757a999ada73d33c" exitCode=0 Jan 27 12:31:43 crc kubenswrapper[4900]: I0127 12:31:43.678588 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" event={"ID":"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a","Type":"ContainerDied","Data":"0205ca09f928b8bbf73c129a9b2da231b28cb926312d4817757a999ada73d33c"} Jan 27 12:31:43 crc kubenswrapper[4900]: I0127 12:31:43.685082 4900 generic.go:334] "Generic (PLEG): container finished" podID="4506f060-72b2-4332-9290-4fc7cde14709" containerID="f0fcb4aa8c4e18d48df1e3d0281e2e6796dc697dc6b6848fd57b1408b0e73f68" exitCode=0 Jan 27 12:31:43 crc kubenswrapper[4900]: I0127 12:31:43.685134 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" event={"ID":"4506f060-72b2-4332-9290-4fc7cde14709","Type":"ContainerDied","Data":"f0fcb4aa8c4e18d48df1e3d0281e2e6796dc697dc6b6848fd57b1408b0e73f68"} Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.048991 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.058313 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.104708 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8"] Jan 27 12:31:44 crc kubenswrapper[4900]: E0127 12:31:44.105073 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a" containerName="controller-manager" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.105092 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a" containerName="controller-manager" Jan 27 12:31:44 crc kubenswrapper[4900]: E0127 12:31:44.105109 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4506f060-72b2-4332-9290-4fc7cde14709" containerName="route-controller-manager" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.105118 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="4506f060-72b2-4332-9290-4fc7cde14709" containerName="route-controller-manager" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.105276 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="4506f060-72b2-4332-9290-4fc7cde14709" containerName="route-controller-manager" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.105299 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a" containerName="controller-manager" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.105792 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.139863 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpfgk\" (UniqueName: \"kubernetes.io/projected/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-kube-api-access-lpfgk\") pod \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.139958 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4506f060-72b2-4332-9290-4fc7cde14709-config\") pod \"4506f060-72b2-4332-9290-4fc7cde14709\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.140018 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-proxy-ca-bundles\") pod \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.140103 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fb29z\" (UniqueName: \"kubernetes.io/projected/4506f060-72b2-4332-9290-4fc7cde14709-kube-api-access-fb29z\") pod \"4506f060-72b2-4332-9290-4fc7cde14709\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.140199 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-client-ca\") pod \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.140233 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4506f060-72b2-4332-9290-4fc7cde14709-serving-cert\") pod \"4506f060-72b2-4332-9290-4fc7cde14709\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.140291 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-config\") pod \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.140362 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4506f060-72b2-4332-9290-4fc7cde14709-client-ca\") pod \"4506f060-72b2-4332-9290-4fc7cde14709\" (UID: \"4506f060-72b2-4332-9290-4fc7cde14709\") " Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.140402 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-serving-cert\") pod \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\" (UID: \"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a\") " Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.141946 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-client-ca" (OuterVolumeSpecName: "client-ca") pod "0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a" (UID: "0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.141972 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a" (UID: "0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.142106 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-config" (OuterVolumeSpecName: "config") pod "0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a" (UID: "0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.143222 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4506f060-72b2-4332-9290-4fc7cde14709-client-ca" (OuterVolumeSpecName: "client-ca") pod "4506f060-72b2-4332-9290-4fc7cde14709" (UID: "4506f060-72b2-4332-9290-4fc7cde14709"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.146384 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4506f060-72b2-4332-9290-4fc7cde14709-config" (OuterVolumeSpecName: "config") pod "4506f060-72b2-4332-9290-4fc7cde14709" (UID: "4506f060-72b2-4332-9290-4fc7cde14709"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.158534 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8"] Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.165344 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a" (UID: "0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.167446 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-kube-api-access-lpfgk" (OuterVolumeSpecName: "kube-api-access-lpfgk") pod "0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a" (UID: "0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a"). InnerVolumeSpecName "kube-api-access-lpfgk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.167591 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4506f060-72b2-4332-9290-4fc7cde14709-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4506f060-72b2-4332-9290-4fc7cde14709" (UID: "4506f060-72b2-4332-9290-4fc7cde14709"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.167562 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4506f060-72b2-4332-9290-4fc7cde14709-kube-api-access-fb29z" (OuterVolumeSpecName: "kube-api-access-fb29z") pod "4506f060-72b2-4332-9290-4fc7cde14709" (UID: "4506f060-72b2-4332-9290-4fc7cde14709"). InnerVolumeSpecName "kube-api-access-fb29z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.242658 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af34c04d-2e4e-47f7-919b-5b56554880dc-client-ca\") pod \"route-controller-manager-7b4c94f6f7-5v8z8\" (UID: \"af34c04d-2e4e-47f7-919b-5b56554880dc\") " pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.243276 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af34c04d-2e4e-47f7-919b-5b56554880dc-config\") pod \"route-controller-manager-7b4c94f6f7-5v8z8\" (UID: \"af34c04d-2e4e-47f7-919b-5b56554880dc\") " pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.243318 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af34c04d-2e4e-47f7-919b-5b56554880dc-serving-cert\") pod \"route-controller-manager-7b4c94f6f7-5v8z8\" (UID: \"af34c04d-2e4e-47f7-919b-5b56554880dc\") " pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.243356 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c58db\" (UniqueName: \"kubernetes.io/projected/af34c04d-2e4e-47f7-919b-5b56554880dc-kube-api-access-c58db\") pod \"route-controller-manager-7b4c94f6f7-5v8z8\" (UID: \"af34c04d-2e4e-47f7-919b-5b56554880dc\") " pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.243471 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4506f060-72b2-4332-9290-4fc7cde14709-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.243493 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.243505 4900 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4506f060-72b2-4332-9290-4fc7cde14709-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.243517 4900 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.243531 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpfgk\" (UniqueName: \"kubernetes.io/projected/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-kube-api-access-lpfgk\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.243542 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4506f060-72b2-4332-9290-4fc7cde14709-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.243559 4900 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.243571 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fb29z\" (UniqueName: \"kubernetes.io/projected/4506f060-72b2-4332-9290-4fc7cde14709-kube-api-access-fb29z\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.243583 4900 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a-client-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.345466 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af34c04d-2e4e-47f7-919b-5b56554880dc-client-ca\") pod \"route-controller-manager-7b4c94f6f7-5v8z8\" (UID: \"af34c04d-2e4e-47f7-919b-5b56554880dc\") " pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.345857 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af34c04d-2e4e-47f7-919b-5b56554880dc-config\") pod \"route-controller-manager-7b4c94f6f7-5v8z8\" (UID: \"af34c04d-2e4e-47f7-919b-5b56554880dc\") " pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.345974 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af34c04d-2e4e-47f7-919b-5b56554880dc-serving-cert\") pod \"route-controller-manager-7b4c94f6f7-5v8z8\" (UID: \"af34c04d-2e4e-47f7-919b-5b56554880dc\") " pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.347231 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af34c04d-2e4e-47f7-919b-5b56554880dc-client-ca\") pod \"route-controller-manager-7b4c94f6f7-5v8z8\" (UID: \"af34c04d-2e4e-47f7-919b-5b56554880dc\") " pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.347463 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c58db\" (UniqueName: \"kubernetes.io/projected/af34c04d-2e4e-47f7-919b-5b56554880dc-kube-api-access-c58db\") pod \"route-controller-manager-7b4c94f6f7-5v8z8\" (UID: \"af34c04d-2e4e-47f7-919b-5b56554880dc\") " pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.347562 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af34c04d-2e4e-47f7-919b-5b56554880dc-config\") pod \"route-controller-manager-7b4c94f6f7-5v8z8\" (UID: \"af34c04d-2e4e-47f7-919b-5b56554880dc\") " pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.359218 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af34c04d-2e4e-47f7-919b-5b56554880dc-serving-cert\") pod \"route-controller-manager-7b4c94f6f7-5v8z8\" (UID: \"af34c04d-2e4e-47f7-919b-5b56554880dc\") " pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.371834 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c58db\" (UniqueName: \"kubernetes.io/projected/af34c04d-2e4e-47f7-919b-5b56554880dc-kube-api-access-c58db\") pod \"route-controller-manager-7b4c94f6f7-5v8z8\" (UID: \"af34c04d-2e4e-47f7-919b-5b56554880dc\") " pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.448299 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.697281 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.697879 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-86d754d577-6cn72" event={"ID":"0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a","Type":"ContainerDied","Data":"4cfc10c7e7b248e54ac2da416bae1666d866f2cef3de2b4a60dd88fe5c3a38d8"} Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.697927 4900 scope.go:117] "RemoveContainer" containerID="0205ca09f928b8bbf73c129a9b2da231b28cb926312d4817757a999ada73d33c" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.701327 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" event={"ID":"4506f060-72b2-4332-9290-4fc7cde14709","Type":"ContainerDied","Data":"61d345101a9deca8906a195cf53724a0d7ebe0f0bf45067a4180edef3b5792c4"} Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.701418 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.717018 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-86d754d577-6cn72"] Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.721803 4900 scope.go:117] "RemoveContainer" containerID="f0fcb4aa8c4e18d48df1e3d0281e2e6796dc697dc6b6848fd57b1408b0e73f68" Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.725657 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-86d754d577-6cn72"] Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.741927 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p"] Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.746085 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7cfb44d9cd-qp92p"] Jan 27 12:31:44 crc kubenswrapper[4900]: I0127 12:31:44.894795 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8"] Jan 27 12:31:44 crc kubenswrapper[4900]: W0127 12:31:44.903357 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf34c04d_2e4e_47f7_919b_5b56554880dc.slice/crio-e51d92cf57c75d8c3d8ec04c79ef82c225e2e852517faff95dc121265d3ee05b WatchSource:0}: Error finding container e51d92cf57c75d8c3d8ec04c79ef82c225e2e852517faff95dc121265d3ee05b: Status 404 returned error can't find the container with id e51d92cf57c75d8c3d8ec04c79ef82c225e2e852517faff95dc121265d3ee05b Jan 27 12:31:45 crc kubenswrapper[4900]: I0127 12:31:45.709433 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" event={"ID":"af34c04d-2e4e-47f7-919b-5b56554880dc","Type":"ContainerStarted","Data":"b30dd8bf02f344b723a3495fa66d536ba9a7ec093b091a8abbd6fa5edee17924"} Jan 27 12:31:45 crc kubenswrapper[4900]: I0127 12:31:45.709859 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:45 crc kubenswrapper[4900]: I0127 12:31:45.709873 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" event={"ID":"af34c04d-2e4e-47f7-919b-5b56554880dc","Type":"ContainerStarted","Data":"e51d92cf57c75d8c3d8ec04c79ef82c225e2e852517faff95dc121265d3ee05b"} Jan 27 12:31:45 crc kubenswrapper[4900]: I0127 12:31:45.714898 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 12:31:45 crc kubenswrapper[4900]: I0127 12:31:45.732131 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" podStartSLOduration=3.732107446 podStartE2EDuration="3.732107446s" podCreationTimestamp="2026-01-27 12:31:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:31:45.728908581 +0000 UTC m=+332.965936791" watchObservedRunningTime="2026-01-27 12:31:45.732107446 +0000 UTC m=+332.969135656" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.124721 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-866c485c84-8p95x"] Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.125679 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.129248 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.129418 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.129700 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.130363 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.130879 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.133847 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.140173 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.141426 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-866c485c84-8p95x"] Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.286998 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bffb26f0-0279-4ad6-ba95-920c7a358068-config\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.287428 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdb7v\" (UniqueName: \"kubernetes.io/projected/bffb26f0-0279-4ad6-ba95-920c7a358068-kube-api-access-xdb7v\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.287508 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bffb26f0-0279-4ad6-ba95-920c7a358068-serving-cert\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.287563 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bffb26f0-0279-4ad6-ba95-920c7a358068-client-ca\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.287586 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bffb26f0-0279-4ad6-ba95-920c7a358068-proxy-ca-bundles\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.389277 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bffb26f0-0279-4ad6-ba95-920c7a358068-serving-cert\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.389415 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bffb26f0-0279-4ad6-ba95-920c7a358068-client-ca\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.389450 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bffb26f0-0279-4ad6-ba95-920c7a358068-proxy-ca-bundles\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.389495 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bffb26f0-0279-4ad6-ba95-920c7a358068-config\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.389536 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdb7v\" (UniqueName: \"kubernetes.io/projected/bffb26f0-0279-4ad6-ba95-920c7a358068-kube-api-access-xdb7v\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.390778 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bffb26f0-0279-4ad6-ba95-920c7a358068-client-ca\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.390890 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bffb26f0-0279-4ad6-ba95-920c7a358068-proxy-ca-bundles\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.391378 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bffb26f0-0279-4ad6-ba95-920c7a358068-config\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.397929 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bffb26f0-0279-4ad6-ba95-920c7a358068-serving-cert\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.411303 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdb7v\" (UniqueName: \"kubernetes.io/projected/bffb26f0-0279-4ad6-ba95-920c7a358068-kube-api-access-xdb7v\") pod \"controller-manager-866c485c84-8p95x\" (UID: \"bffb26f0-0279-4ad6-ba95-920c7a358068\") " pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.440573 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.497782 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a" path="/var/lib/kubelet/pods/0ed69c43-55f1-4a5a-ab33-89a20d8bbf8a/volumes" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.498530 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4506f060-72b2-4332-9290-4fc7cde14709" path="/var/lib/kubelet/pods/4506f060-72b2-4332-9290-4fc7cde14709/volumes" Jan 27 12:31:46 crc kubenswrapper[4900]: I0127 12:31:46.863773 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-866c485c84-8p95x"] Jan 27 12:31:46 crc kubenswrapper[4900]: W0127 12:31:46.873822 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbffb26f0_0279_4ad6_ba95_920c7a358068.slice/crio-897046e86f61dc3e725d53418b05d959bcd154e491606de0008a5bc1e240f79c WatchSource:0}: Error finding container 897046e86f61dc3e725d53418b05d959bcd154e491606de0008a5bc1e240f79c: Status 404 returned error can't find the container with id 897046e86f61dc3e725d53418b05d959bcd154e491606de0008a5bc1e240f79c Jan 27 12:31:47 crc kubenswrapper[4900]: I0127 12:31:47.726896 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" event={"ID":"bffb26f0-0279-4ad6-ba95-920c7a358068","Type":"ContainerStarted","Data":"83260d35c539356d18080818b54f5518d5f43ec2c8600fdc94fc6d9d1b832c94"} Jan 27 12:31:47 crc kubenswrapper[4900]: I0127 12:31:47.729628 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" event={"ID":"bffb26f0-0279-4ad6-ba95-920c7a358068","Type":"ContainerStarted","Data":"897046e86f61dc3e725d53418b05d959bcd154e491606de0008a5bc1e240f79c"} Jan 27 12:31:47 crc kubenswrapper[4900]: I0127 12:31:47.729698 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:47 crc kubenswrapper[4900]: I0127 12:31:47.732446 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 12:31:47 crc kubenswrapper[4900]: I0127 12:31:47.748916 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" podStartSLOduration=5.748893567 podStartE2EDuration="5.748893567s" podCreationTimestamp="2026-01-27 12:31:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:31:47.745778105 +0000 UTC m=+334.982806315" watchObservedRunningTime="2026-01-27 12:31:47.748893567 +0000 UTC m=+334.985921777" Jan 27 12:31:48 crc kubenswrapper[4900]: I0127 12:31:48.053946 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" Jan 27 12:31:48 crc kubenswrapper[4900]: I0127 12:31:48.114568 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-cr4gz"] Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.118752 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" podUID="2b4fdd15-bb83-4db3-bf15-2101476b4000" containerName="oauth-openshift" containerID="cri-o://50698a026c6ee84760b341e0a81cc1a1231b392961d0d83e6345aa615ac6874b" gracePeriod=15 Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.624907 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.660580 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5"] Jan 27 12:32:08 crc kubenswrapper[4900]: E0127 12:32:08.660866 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b4fdd15-bb83-4db3-bf15-2101476b4000" containerName="oauth-openshift" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.660882 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b4fdd15-bb83-4db3-bf15-2101476b4000" containerName="oauth-openshift" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.660979 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b4fdd15-bb83-4db3-bf15-2101476b4000" containerName="oauth-openshift" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.661385 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.677773 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgmzb\" (UniqueName: \"kubernetes.io/projected/2b4fdd15-bb83-4db3-bf15-2101476b4000-kube-api-access-cgmzb\") pod \"2b4fdd15-bb83-4db3-bf15-2101476b4000\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.677899 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-session\") pod \"2b4fdd15-bb83-4db3-bf15-2101476b4000\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.678107 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-router-certs\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.678150 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.678183 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-user-template-error\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.678223 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-service-ca\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.678243 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-user-template-login\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.678282 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.678312 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/88330a90-8030-489a-898c-2690958a1a8e-audit-policies\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.678336 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cr7n5\" (UniqueName: \"kubernetes.io/projected/88330a90-8030-489a-898c-2690958a1a8e-kube-api-access-cr7n5\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.678357 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.678378 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.678432 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/88330a90-8030-489a-898c-2690958a1a8e-audit-dir\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.678457 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.678484 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-session\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.678529 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.681932 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5"] Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.699633 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "2b4fdd15-bb83-4db3-bf15-2101476b4000" (UID: "2b4fdd15-bb83-4db3-bf15-2101476b4000"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.700811 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b4fdd15-bb83-4db3-bf15-2101476b4000-kube-api-access-cgmzb" (OuterVolumeSpecName: "kube-api-access-cgmzb") pod "2b4fdd15-bb83-4db3-bf15-2101476b4000" (UID: "2b4fdd15-bb83-4db3-bf15-2101476b4000"). InnerVolumeSpecName "kube-api-access-cgmzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779356 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-login\") pod \"2b4fdd15-bb83-4db3-bf15-2101476b4000\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779410 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2b4fdd15-bb83-4db3-bf15-2101476b4000-audit-dir\") pod \"2b4fdd15-bb83-4db3-bf15-2101476b4000\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779434 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-error\") pod \"2b4fdd15-bb83-4db3-bf15-2101476b4000\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779454 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-idp-0-file-data\") pod \"2b4fdd15-bb83-4db3-bf15-2101476b4000\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779478 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-ocp-branding-template\") pod \"2b4fdd15-bb83-4db3-bf15-2101476b4000\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779496 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-trusted-ca-bundle\") pod \"2b4fdd15-bb83-4db3-bf15-2101476b4000\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779516 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-audit-policies\") pod \"2b4fdd15-bb83-4db3-bf15-2101476b4000\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779543 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-service-ca\") pod \"2b4fdd15-bb83-4db3-bf15-2101476b4000\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779563 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-cliconfig\") pod \"2b4fdd15-bb83-4db3-bf15-2101476b4000\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779609 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-router-certs\") pod \"2b4fdd15-bb83-4db3-bf15-2101476b4000\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779643 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-serving-cert\") pod \"2b4fdd15-bb83-4db3-bf15-2101476b4000\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779667 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-provider-selection\") pod \"2b4fdd15-bb83-4db3-bf15-2101476b4000\" (UID: \"2b4fdd15-bb83-4db3-bf15-2101476b4000\") " Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779762 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779786 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/88330a90-8030-489a-898c-2690958a1a8e-audit-policies\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779801 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cr7n5\" (UniqueName: \"kubernetes.io/projected/88330a90-8030-489a-898c-2690958a1a8e-kube-api-access-cr7n5\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779817 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779840 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779873 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/88330a90-8030-489a-898c-2690958a1a8e-audit-dir\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779891 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779908 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-session\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779932 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779949 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-router-certs\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779968 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.779989 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-user-template-error\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.780022 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-service-ca\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.780040 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-user-template-login\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.780092 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgmzb\" (UniqueName: \"kubernetes.io/projected/2b4fdd15-bb83-4db3-bf15-2101476b4000-kube-api-access-cgmzb\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.780106 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.780260 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2b4fdd15-bb83-4db3-bf15-2101476b4000-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "2b4fdd15-bb83-4db3-bf15-2101476b4000" (UID: "2b4fdd15-bb83-4db3-bf15-2101476b4000"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.780765 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "2b4fdd15-bb83-4db3-bf15-2101476b4000" (UID: "2b4fdd15-bb83-4db3-bf15-2101476b4000"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.781152 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "2b4fdd15-bb83-4db3-bf15-2101476b4000" (UID: "2b4fdd15-bb83-4db3-bf15-2101476b4000"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.782153 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "2b4fdd15-bb83-4db3-bf15-2101476b4000" (UID: "2b4fdd15-bb83-4db3-bf15-2101476b4000"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.782234 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.782590 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-service-ca\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.782643 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "2b4fdd15-bb83-4db3-bf15-2101476b4000" (UID: "2b4fdd15-bb83-4db3-bf15-2101476b4000"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.782738 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/88330a90-8030-489a-898c-2690958a1a8e-audit-dir\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.782802 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/88330a90-8030-489a-898c-2690958a1a8e-audit-policies\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.784775 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.784803 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-user-template-error\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.785539 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "2b4fdd15-bb83-4db3-bf15-2101476b4000" (UID: "2b4fdd15-bb83-4db3-bf15-2101476b4000"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.785680 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.785838 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "2b4fdd15-bb83-4db3-bf15-2101476b4000" (UID: "2b4fdd15-bb83-4db3-bf15-2101476b4000"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.786130 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-user-template-login\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.786373 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "2b4fdd15-bb83-4db3-bf15-2101476b4000" (UID: "2b4fdd15-bb83-4db3-bf15-2101476b4000"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.786478 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "2b4fdd15-bb83-4db3-bf15-2101476b4000" (UID: "2b4fdd15-bb83-4db3-bf15-2101476b4000"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.786552 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.786752 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "2b4fdd15-bb83-4db3-bf15-2101476b4000" (UID: "2b4fdd15-bb83-4db3-bf15-2101476b4000"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.786806 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-session\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.786851 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-router-certs\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.787034 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.787813 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "2b4fdd15-bb83-4db3-bf15-2101476b4000" (UID: "2b4fdd15-bb83-4db3-bf15-2101476b4000"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.788137 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/88330a90-8030-489a-898c-2690958a1a8e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.789043 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "2b4fdd15-bb83-4db3-bf15-2101476b4000" (UID: "2b4fdd15-bb83-4db3-bf15-2101476b4000"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.796932 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cr7n5\" (UniqueName: \"kubernetes.io/projected/88330a90-8030-489a-898c-2690958a1a8e-kube-api-access-cr7n5\") pod \"oauth-openshift-6d4d98fcc6-f4gd5\" (UID: \"88330a90-8030-489a-898c-2690958a1a8e\") " pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.875355 4900 generic.go:334] "Generic (PLEG): container finished" podID="2b4fdd15-bb83-4db3-bf15-2101476b4000" containerID="50698a026c6ee84760b341e0a81cc1a1231b392961d0d83e6345aa615ac6874b" exitCode=0 Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.875415 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" event={"ID":"2b4fdd15-bb83-4db3-bf15-2101476b4000","Type":"ContainerDied","Data":"50698a026c6ee84760b341e0a81cc1a1231b392961d0d83e6345aa615ac6874b"} Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.875439 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.875464 4900 scope.go:117] "RemoveContainer" containerID="50698a026c6ee84760b341e0a81cc1a1231b392961d0d83e6345aa615ac6874b" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.875449 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-wndtt" event={"ID":"2b4fdd15-bb83-4db3-bf15-2101476b4000","Type":"ContainerDied","Data":"a699391860187feb1790c0c70ae02876e10bae3ae6add88c7d931a4f58bb2f78"} Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.880560 4900 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2b4fdd15-bb83-4db3-bf15-2101476b4000-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.880686 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.880794 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.880890 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.880955 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.881044 4900 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.881143 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.881223 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.881309 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.881381 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.881452 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.881539 4900 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2b4fdd15-bb83-4db3-bf15-2101476b4000-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.898335 4900 scope.go:117] "RemoveContainer" containerID="50698a026c6ee84760b341e0a81cc1a1231b392961d0d83e6345aa615ac6874b" Jan 27 12:32:08 crc kubenswrapper[4900]: E0127 12:32:08.901021 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50698a026c6ee84760b341e0a81cc1a1231b392961d0d83e6345aa615ac6874b\": container with ID starting with 50698a026c6ee84760b341e0a81cc1a1231b392961d0d83e6345aa615ac6874b not found: ID does not exist" containerID="50698a026c6ee84760b341e0a81cc1a1231b392961d0d83e6345aa615ac6874b" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.901179 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50698a026c6ee84760b341e0a81cc1a1231b392961d0d83e6345aa615ac6874b"} err="failed to get container status \"50698a026c6ee84760b341e0a81cc1a1231b392961d0d83e6345aa615ac6874b\": rpc error: code = NotFound desc = could not find container \"50698a026c6ee84760b341e0a81cc1a1231b392961d0d83e6345aa615ac6874b\": container with ID starting with 50698a026c6ee84760b341e0a81cc1a1231b392961d0d83e6345aa615ac6874b not found: ID does not exist" Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.903218 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-wndtt"] Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.907075 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-wndtt"] Jan 27 12:32:08 crc kubenswrapper[4900]: I0127 12:32:08.983935 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:09 crc kubenswrapper[4900]: I0127 12:32:09.373758 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5"] Jan 27 12:32:09 crc kubenswrapper[4900]: I0127 12:32:09.883528 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" event={"ID":"88330a90-8030-489a-898c-2690958a1a8e","Type":"ContainerStarted","Data":"99b7678c82d77089ad530dce58e1c26fc00601dda0d1d842420d2dfbee4caa76"} Jan 27 12:32:09 crc kubenswrapper[4900]: I0127 12:32:09.884014 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:09 crc kubenswrapper[4900]: I0127 12:32:09.884035 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" event={"ID":"88330a90-8030-489a-898c-2690958a1a8e","Type":"ContainerStarted","Data":"0921a3ff429e02813f2901ce5be8bbb71f89fa3981989904db6356cf5bac58bc"} Jan 27 12:32:09 crc kubenswrapper[4900]: I0127 12:32:09.910421 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" podStartSLOduration=26.910395728 podStartE2EDuration="26.910395728s" podCreationTimestamp="2026-01-27 12:31:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:32:09.90232644 +0000 UTC m=+357.139354670" watchObservedRunningTime="2026-01-27 12:32:09.910395728 +0000 UTC m=+357.147423938" Jan 27 12:32:10 crc kubenswrapper[4900]: I0127 12:32:10.014712 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 12:32:10 crc kubenswrapper[4900]: I0127 12:32:10.493342 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b4fdd15-bb83-4db3-bf15-2101476b4000" path="/var/lib/kubelet/pods/2b4fdd15-bb83-4db3-bf15-2101476b4000/volumes" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.171601 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" podUID="96450fdc-fc86-4fc2-a04d-cfe29f04aa18" containerName="registry" containerID="cri-o://40afbebb1483efe545203adc853e830beec6ce509567b405405a7831845ecef7" gracePeriod=30 Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.723245 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.879799 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-ca-trust-extracted\") pod \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.879920 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-registry-tls\") pod \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.880203 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.880613 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-registry-certificates\") pod \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.880680 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-trusted-ca\") pod \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.880734 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mthgq\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-kube-api-access-mthgq\") pod \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.881598 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "96450fdc-fc86-4fc2-a04d-cfe29f04aa18" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.881753 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-bound-sa-token\") pod \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.881740 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "96450fdc-fc86-4fc2-a04d-cfe29f04aa18" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.882252 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-installation-pull-secrets\") pod \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\" (UID: \"96450fdc-fc86-4fc2-a04d-cfe29f04aa18\") " Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.882663 4900 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.882686 4900 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.886914 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-kube-api-access-mthgq" (OuterVolumeSpecName: "kube-api-access-mthgq") pod "96450fdc-fc86-4fc2-a04d-cfe29f04aa18" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18"). InnerVolumeSpecName "kube-api-access-mthgq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.891497 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "96450fdc-fc86-4fc2-a04d-cfe29f04aa18" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.891963 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "96450fdc-fc86-4fc2-a04d-cfe29f04aa18" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.892002 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "96450fdc-fc86-4fc2-a04d-cfe29f04aa18" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.896138 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "96450fdc-fc86-4fc2-a04d-cfe29f04aa18" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.897361 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "96450fdc-fc86-4fc2-a04d-cfe29f04aa18" (UID: "96450fdc-fc86-4fc2-a04d-cfe29f04aa18"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.908399 4900 generic.go:334] "Generic (PLEG): container finished" podID="96450fdc-fc86-4fc2-a04d-cfe29f04aa18" containerID="40afbebb1483efe545203adc853e830beec6ce509567b405405a7831845ecef7" exitCode=0 Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.908441 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" event={"ID":"96450fdc-fc86-4fc2-a04d-cfe29f04aa18","Type":"ContainerDied","Data":"40afbebb1483efe545203adc853e830beec6ce509567b405405a7831845ecef7"} Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.908468 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.908480 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-cr4gz" event={"ID":"96450fdc-fc86-4fc2-a04d-cfe29f04aa18","Type":"ContainerDied","Data":"98ed5745a98fb92854854e59b2c63df8354947b8e5e23d6dd7ad9d7e747d8ae5"} Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.908529 4900 scope.go:117] "RemoveContainer" containerID="40afbebb1483efe545203adc853e830beec6ce509567b405405a7831845ecef7" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.949178 4900 scope.go:117] "RemoveContainer" containerID="40afbebb1483efe545203adc853e830beec6ce509567b405405a7831845ecef7" Jan 27 12:32:13 crc kubenswrapper[4900]: E0127 12:32:13.949607 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40afbebb1483efe545203adc853e830beec6ce509567b405405a7831845ecef7\": container with ID starting with 40afbebb1483efe545203adc853e830beec6ce509567b405405a7831845ecef7 not found: ID does not exist" containerID="40afbebb1483efe545203adc853e830beec6ce509567b405405a7831845ecef7" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.949654 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40afbebb1483efe545203adc853e830beec6ce509567b405405a7831845ecef7"} err="failed to get container status \"40afbebb1483efe545203adc853e830beec6ce509567b405405a7831845ecef7\": rpc error: code = NotFound desc = could not find container \"40afbebb1483efe545203adc853e830beec6ce509567b405405a7831845ecef7\": container with ID starting with 40afbebb1483efe545203adc853e830beec6ce509567b405405a7831845ecef7 not found: ID does not exist" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.969223 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-cr4gz"] Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.972100 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-cr4gz"] Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.984371 4900 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.984444 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mthgq\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-kube-api-access-mthgq\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.984458 4900 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.984472 4900 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:13 crc kubenswrapper[4900]: I0127 12:32:13.984484 4900 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/96450fdc-fc86-4fc2-a04d-cfe29f04aa18-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:14 crc kubenswrapper[4900]: I0127 12:32:14.491536 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96450fdc-fc86-4fc2-a04d-cfe29f04aa18" path="/var/lib/kubelet/pods/96450fdc-fc86-4fc2-a04d-cfe29f04aa18/volumes" Jan 27 12:32:22 crc kubenswrapper[4900]: I0127 12:32:22.372453 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:32:22 crc kubenswrapper[4900]: I0127 12:32:22.373258 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.915453 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-grrcf"] Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.919270 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-grrcf" podUID="754ddb47-7891-4142-b52c-98a29ca40078" containerName="registry-server" containerID="cri-o://e5fcd44b18666d4587e5663923891df7d0036bbca45f909b308f25954c2a32a1" gracePeriod=30 Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.939222 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jvcrt"] Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.939533 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jvcrt" podUID="86867c21-fd52-44e7-85d2-87da48322397" containerName="registry-server" containerID="cri-o://a5338c72764cd468a9a4699ae43a19077c903f169068a51d6494b6f9cb03da8e" gracePeriod=30 Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.947885 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8kcp8"] Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.951580 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" podUID="0c526262-6ee4-4526-91d7-614b3cd91082" containerName="marketplace-operator" containerID="cri-o://a4361e38b46e40aad517c1d78e85dfc2c2cf5a6639fc9abdfbda2cbc02a3860f" gracePeriod=30 Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.964620 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r6fg7"] Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.965130 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-r6fg7" podUID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" containerName="registry-server" containerID="cri-o://9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122" gracePeriod=30 Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.972422 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9vrn8"] Jan 27 12:32:28 crc kubenswrapper[4900]: E0127 12:32:28.972855 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96450fdc-fc86-4fc2-a04d-cfe29f04aa18" containerName="registry" Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.972880 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="96450fdc-fc86-4fc2-a04d-cfe29f04aa18" containerName="registry" Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.973006 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="96450fdc-fc86-4fc2-a04d-cfe29f04aa18" containerName="registry" Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.973542 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.984903 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9s4cn"] Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.985237 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9s4cn" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" containerName="registry-server" containerID="cri-o://4628df3d3bc34de97560d8cb0b2fce0302a9a745a901b04ff425f5f7357864ff" gracePeriod=30 Jan 27 12:32:28 crc kubenswrapper[4900]: I0127 12:32:28.988829 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9vrn8"] Jan 27 12:32:29 crc kubenswrapper[4900]: E0127 12:32:29.013760 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122 is running failed: container process not found" containerID="9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122" cmd=["grpc_health_probe","-addr=:50051"] Jan 27 12:32:29 crc kubenswrapper[4900]: E0127 12:32:29.016669 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122 is running failed: container process not found" containerID="9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122" cmd=["grpc_health_probe","-addr=:50051"] Jan 27 12:32:29 crc kubenswrapper[4900]: E0127 12:32:29.017383 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122 is running failed: container process not found" containerID="9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122" cmd=["grpc_health_probe","-addr=:50051"] Jan 27 12:32:29 crc kubenswrapper[4900]: E0127 12:32:29.017475 4900 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-r6fg7" podUID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" containerName="registry-server" Jan 27 12:32:29 crc kubenswrapper[4900]: I0127 12:32:29.149537 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96ntn\" (UniqueName: \"kubernetes.io/projected/54ddde5c-b5ea-47c1-8ef5-f697d7319c6b-kube-api-access-96ntn\") pod \"marketplace-operator-79b997595-9vrn8\" (UID: \"54ddde5c-b5ea-47c1-8ef5-f697d7319c6b\") " pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" Jan 27 12:32:29 crc kubenswrapper[4900]: I0127 12:32:29.149671 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/54ddde5c-b5ea-47c1-8ef5-f697d7319c6b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9vrn8\" (UID: \"54ddde5c-b5ea-47c1-8ef5-f697d7319c6b\") " pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" Jan 27 12:32:29 crc kubenswrapper[4900]: I0127 12:32:29.149728 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/54ddde5c-b5ea-47c1-8ef5-f697d7319c6b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9vrn8\" (UID: \"54ddde5c-b5ea-47c1-8ef5-f697d7319c6b\") " pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" Jan 27 12:32:29 crc kubenswrapper[4900]: I0127 12:32:29.250664 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/54ddde5c-b5ea-47c1-8ef5-f697d7319c6b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9vrn8\" (UID: \"54ddde5c-b5ea-47c1-8ef5-f697d7319c6b\") " pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" Jan 27 12:32:29 crc kubenswrapper[4900]: I0127 12:32:29.250758 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/54ddde5c-b5ea-47c1-8ef5-f697d7319c6b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9vrn8\" (UID: \"54ddde5c-b5ea-47c1-8ef5-f697d7319c6b\") " pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" Jan 27 12:32:29 crc kubenswrapper[4900]: I0127 12:32:29.250805 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96ntn\" (UniqueName: \"kubernetes.io/projected/54ddde5c-b5ea-47c1-8ef5-f697d7319c6b-kube-api-access-96ntn\") pod \"marketplace-operator-79b997595-9vrn8\" (UID: \"54ddde5c-b5ea-47c1-8ef5-f697d7319c6b\") " pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" Jan 27 12:32:29 crc kubenswrapper[4900]: I0127 12:32:29.252544 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/54ddde5c-b5ea-47c1-8ef5-f697d7319c6b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9vrn8\" (UID: \"54ddde5c-b5ea-47c1-8ef5-f697d7319c6b\") " pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" Jan 27 12:32:29 crc kubenswrapper[4900]: I0127 12:32:29.261580 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/54ddde5c-b5ea-47c1-8ef5-f697d7319c6b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9vrn8\" (UID: \"54ddde5c-b5ea-47c1-8ef5-f697d7319c6b\") " pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" Jan 27 12:32:29 crc kubenswrapper[4900]: I0127 12:32:29.268956 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96ntn\" (UniqueName: \"kubernetes.io/projected/54ddde5c-b5ea-47c1-8ef5-f697d7319c6b-kube-api-access-96ntn\") pod \"marketplace-operator-79b997595-9vrn8\" (UID: \"54ddde5c-b5ea-47c1-8ef5-f697d7319c6b\") " pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" Jan 27 12:32:29 crc kubenswrapper[4900]: I0127 12:32:29.296119 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" Jan 27 12:32:29 crc kubenswrapper[4900]: I0127 12:32:29.760014 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9vrn8"] Jan 27 12:32:29 crc kubenswrapper[4900]: W0127 12:32:29.789812 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod54ddde5c_b5ea_47c1_8ef5_f697d7319c6b.slice/crio-93ab6b05b7fd0e3c3ef9525fe58db3853ff10d2a3c9f6062e584135c0ceb6ef4 WatchSource:0}: Error finding container 93ab6b05b7fd0e3c3ef9525fe58db3853ff10d2a3c9f6062e584135c0ceb6ef4: Status 404 returned error can't find the container with id 93ab6b05b7fd0e3c3ef9525fe58db3853ff10d2a3c9f6062e584135c0ceb6ef4 Jan 27 12:32:29 crc kubenswrapper[4900]: I0127 12:32:29.897955 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:32:29 crc kubenswrapper[4900]: E0127 12:32:29.937898 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4628df3d3bc34de97560d8cb0b2fce0302a9a745a901b04ff425f5f7357864ff is running failed: container process not found" containerID="4628df3d3bc34de97560d8cb0b2fce0302a9a745a901b04ff425f5f7357864ff" cmd=["grpc_health_probe","-addr=:50051"] Jan 27 12:32:29 crc kubenswrapper[4900]: E0127 12:32:29.938720 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4628df3d3bc34de97560d8cb0b2fce0302a9a745a901b04ff425f5f7357864ff is running failed: container process not found" containerID="4628df3d3bc34de97560d8cb0b2fce0302a9a745a901b04ff425f5f7357864ff" cmd=["grpc_health_probe","-addr=:50051"] Jan 27 12:32:29 crc kubenswrapper[4900]: E0127 12:32:29.938989 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4628df3d3bc34de97560d8cb0b2fce0302a9a745a901b04ff425f5f7357864ff is running failed: container process not found" containerID="4628df3d3bc34de97560d8cb0b2fce0302a9a745a901b04ff425f5f7357864ff" cmd=["grpc_health_probe","-addr=:50051"] Jan 27 12:32:29 crc kubenswrapper[4900]: E0127 12:32:29.939023 4900 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4628df3d3bc34de97560d8cb0b2fce0302a9a745a901b04ff425f5f7357864ff is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-9s4cn" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" containerName="registry-server" Jan 27 12:32:29 crc kubenswrapper[4900]: I0127 12:32:29.979748 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:32:29 crc kubenswrapper[4900]: I0127 12:32:29.998601 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.027509 4900 generic.go:334] "Generic (PLEG): container finished" podID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" containerID="9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122" exitCode=0 Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.027626 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r6fg7" event={"ID":"354400fa-dc6c-4435-b9cf-09b5e76a6ef2","Type":"ContainerDied","Data":"9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122"} Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.027666 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r6fg7" event={"ID":"354400fa-dc6c-4435-b9cf-09b5e76a6ef2","Type":"ContainerDied","Data":"ba9ddd28898cfb8b02b0b3e55804b629fa31eb7b967eba78d143cad64eeb2d52"} Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.027693 4900 scope.go:117] "RemoveContainer" containerID="9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.027872 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r6fg7" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.031804 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" event={"ID":"54ddde5c-b5ea-47c1-8ef5-f697d7319c6b","Type":"ContainerStarted","Data":"f1ce5f65b96fe1b16a166764c8f88fde403e57d56c45df72267c3662db60b46f"} Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.031840 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" event={"ID":"54ddde5c-b5ea-47c1-8ef5-f697d7319c6b","Type":"ContainerStarted","Data":"93ab6b05b7fd0e3c3ef9525fe58db3853ff10d2a3c9f6062e584135c0ceb6ef4"} Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.032574 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.034388 4900 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-9vrn8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.69:8080/healthz\": dial tcp 10.217.0.69:8080: connect: connection refused" start-of-body= Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.034439 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" podUID="54ddde5c-b5ea-47c1-8ef5-f697d7319c6b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.69:8080/healthz\": dial tcp 10.217.0.69:8080: connect: connection refused" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.035134 4900 generic.go:334] "Generic (PLEG): container finished" podID="86867c21-fd52-44e7-85d2-87da48322397" containerID="a5338c72764cd468a9a4699ae43a19077c903f169068a51d6494b6f9cb03da8e" exitCode=0 Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.035205 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jvcrt" event={"ID":"86867c21-fd52-44e7-85d2-87da48322397","Type":"ContainerDied","Data":"a5338c72764cd468a9a4699ae43a19077c903f169068a51d6494b6f9cb03da8e"} Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.046758 4900 generic.go:334] "Generic (PLEG): container finished" podID="0c526262-6ee4-4526-91d7-614b3cd91082" containerID="a4361e38b46e40aad517c1d78e85dfc2c2cf5a6639fc9abdfbda2cbc02a3860f" exitCode=0 Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.046855 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" event={"ID":"0c526262-6ee4-4526-91d7-614b3cd91082","Type":"ContainerDied","Data":"a4361e38b46e40aad517c1d78e85dfc2c2cf5a6639fc9abdfbda2cbc02a3860f"} Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.046891 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" event={"ID":"0c526262-6ee4-4526-91d7-614b3cd91082","Type":"ContainerDied","Data":"2de967c1c9520900044bf0ef2256ff472c0eedb60be4f81e5890c1e28c42cee8"} Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.046996 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-8kcp8" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.050288 4900 generic.go:334] "Generic (PLEG): container finished" podID="754ddb47-7891-4142-b52c-98a29ca40078" containerID="e5fcd44b18666d4587e5663923891df7d0036bbca45f909b308f25954c2a32a1" exitCode=0 Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.050401 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-grrcf" event={"ID":"754ddb47-7891-4142-b52c-98a29ca40078","Type":"ContainerDied","Data":"e5fcd44b18666d4587e5663923891df7d0036bbca45f909b308f25954c2a32a1"} Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.050438 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-grrcf" event={"ID":"754ddb47-7891-4142-b52c-98a29ca40078","Type":"ContainerDied","Data":"e7c571e5811a9fc1786cb8745b22a4661ccd0841055dbf721f9420401aa46a43"} Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.050538 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-grrcf" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.054113 4900 generic.go:334] "Generic (PLEG): container finished" podID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" containerID="4628df3d3bc34de97560d8cb0b2fce0302a9a745a901b04ff425f5f7357864ff" exitCode=0 Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.054207 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9s4cn" event={"ID":"93a3d7a3-dddf-49e3-be5b-5369108a5e13","Type":"ContainerDied","Data":"4628df3d3bc34de97560d8cb0b2fce0302a9a745a901b04ff425f5f7357864ff"} Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.061651 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/754ddb47-7891-4142-b52c-98a29ca40078-catalog-content\") pod \"754ddb47-7891-4142-b52c-98a29ca40078\" (UID: \"754ddb47-7891-4142-b52c-98a29ca40078\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.063643 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-utilities\") pod \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\" (UID: \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.064072 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0c526262-6ee4-4526-91d7-614b3cd91082-marketplace-operator-metrics\") pod \"0c526262-6ee4-4526-91d7-614b3cd91082\" (UID: \"0c526262-6ee4-4526-91d7-614b3cd91082\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.064118 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wn6nj\" (UniqueName: \"kubernetes.io/projected/754ddb47-7891-4142-b52c-98a29ca40078-kube-api-access-wn6nj\") pod \"754ddb47-7891-4142-b52c-98a29ca40078\" (UID: \"754ddb47-7891-4142-b52c-98a29ca40078\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.064146 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/754ddb47-7891-4142-b52c-98a29ca40078-utilities\") pod \"754ddb47-7891-4142-b52c-98a29ca40078\" (UID: \"754ddb47-7891-4142-b52c-98a29ca40078\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.064195 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rdhgd\" (UniqueName: \"kubernetes.io/projected/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-kube-api-access-rdhgd\") pod \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\" (UID: \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.064222 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9tvq\" (UniqueName: \"kubernetes.io/projected/0c526262-6ee4-4526-91d7-614b3cd91082-kube-api-access-p9tvq\") pod \"0c526262-6ee4-4526-91d7-614b3cd91082\" (UID: \"0c526262-6ee4-4526-91d7-614b3cd91082\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.064379 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0c526262-6ee4-4526-91d7-614b3cd91082-marketplace-trusted-ca\") pod \"0c526262-6ee4-4526-91d7-614b3cd91082\" (UID: \"0c526262-6ee4-4526-91d7-614b3cd91082\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.065773 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-utilities" (OuterVolumeSpecName: "utilities") pod "354400fa-dc6c-4435-b9cf-09b5e76a6ef2" (UID: "354400fa-dc6c-4435-b9cf-09b5e76a6ef2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.067923 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c526262-6ee4-4526-91d7-614b3cd91082-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "0c526262-6ee4-4526-91d7-614b3cd91082" (UID: "0c526262-6ee4-4526-91d7-614b3cd91082"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.068610 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/754ddb47-7891-4142-b52c-98a29ca40078-utilities" (OuterVolumeSpecName: "utilities") pod "754ddb47-7891-4142-b52c-98a29ca40078" (UID: "754ddb47-7891-4142-b52c-98a29ca40078"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.068836 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" podStartSLOduration=2.068817312 podStartE2EDuration="2.068817312s" podCreationTimestamp="2026-01-27 12:32:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:32:30.063895116 +0000 UTC m=+377.300923346" watchObservedRunningTime="2026-01-27 12:32:30.068817312 +0000 UTC m=+377.305845522" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.165640 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-catalog-content\") pod \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\" (UID: \"354400fa-dc6c-4435-b9cf-09b5e76a6ef2\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.166245 4900 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0c526262-6ee4-4526-91d7-614b3cd91082-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.166288 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.166304 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/754ddb47-7891-4142-b52c-98a29ca40078-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.189290 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "354400fa-dc6c-4435-b9cf-09b5e76a6ef2" (UID: "354400fa-dc6c-4435-b9cf-09b5e76a6ef2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.193935 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-kube-api-access-rdhgd" (OuterVolumeSpecName: "kube-api-access-rdhgd") pod "354400fa-dc6c-4435-b9cf-09b5e76a6ef2" (UID: "354400fa-dc6c-4435-b9cf-09b5e76a6ef2"). InnerVolumeSpecName "kube-api-access-rdhgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.194007 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/754ddb47-7891-4142-b52c-98a29ca40078-kube-api-access-wn6nj" (OuterVolumeSpecName: "kube-api-access-wn6nj") pod "754ddb47-7891-4142-b52c-98a29ca40078" (UID: "754ddb47-7891-4142-b52c-98a29ca40078"). InnerVolumeSpecName "kube-api-access-wn6nj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.195599 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/754ddb47-7891-4142-b52c-98a29ca40078-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "754ddb47-7891-4142-b52c-98a29ca40078" (UID: "754ddb47-7891-4142-b52c-98a29ca40078"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.196335 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c526262-6ee4-4526-91d7-614b3cd91082-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "0c526262-6ee4-4526-91d7-614b3cd91082" (UID: "0c526262-6ee4-4526-91d7-614b3cd91082"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.266815 4900 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0c526262-6ee4-4526-91d7-614b3cd91082-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.266861 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wn6nj\" (UniqueName: \"kubernetes.io/projected/754ddb47-7891-4142-b52c-98a29ca40078-kube-api-access-wn6nj\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.266877 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rdhgd\" (UniqueName: \"kubernetes.io/projected/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-kube-api-access-rdhgd\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.266889 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/354400fa-dc6c-4435-b9cf-09b5e76a6ef2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.266902 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/754ddb47-7891-4142-b52c-98a29ca40078-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.290933 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c526262-6ee4-4526-91d7-614b3cd91082-kube-api-access-p9tvq" (OuterVolumeSpecName: "kube-api-access-p9tvq") pod "0c526262-6ee4-4526-91d7-614b3cd91082" (UID: "0c526262-6ee4-4526-91d7-614b3cd91082"). InnerVolumeSpecName "kube-api-access-p9tvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.367908 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9tvq\" (UniqueName: \"kubernetes.io/projected/0c526262-6ee4-4526-91d7-614b3cd91082-kube-api-access-p9tvq\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.385919 4900 scope.go:117] "RemoveContainer" containerID="0808077f381c53b0b7f5dffa7080fbbf6605ec74eb52a0cd0df163c135260006" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.389141 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.410237 4900 scope.go:117] "RemoveContainer" containerID="d6e0a0af1af2784d671b0260deb0bd3553dba90d889d123de5312a7d45de2678" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.428655 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.447234 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r6fg7"] Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.449521 4900 scope.go:117] "RemoveContainer" containerID="9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122" Jan 27 12:32:30 crc kubenswrapper[4900]: E0127 12:32:30.450003 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122\": container with ID starting with 9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122 not found: ID does not exist" containerID="9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.450040 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122"} err="failed to get container status \"9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122\": rpc error: code = NotFound desc = could not find container \"9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122\": container with ID starting with 9d15b259c868d21d94e9d5c0df72860c47cca5bba7bb4e538f29d50547884122 not found: ID does not exist" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.450079 4900 scope.go:117] "RemoveContainer" containerID="0808077f381c53b0b7f5dffa7080fbbf6605ec74eb52a0cd0df163c135260006" Jan 27 12:32:30 crc kubenswrapper[4900]: E0127 12:32:30.450373 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0808077f381c53b0b7f5dffa7080fbbf6605ec74eb52a0cd0df163c135260006\": container with ID starting with 0808077f381c53b0b7f5dffa7080fbbf6605ec74eb52a0cd0df163c135260006 not found: ID does not exist" containerID="0808077f381c53b0b7f5dffa7080fbbf6605ec74eb52a0cd0df163c135260006" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.450405 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0808077f381c53b0b7f5dffa7080fbbf6605ec74eb52a0cd0df163c135260006"} err="failed to get container status \"0808077f381c53b0b7f5dffa7080fbbf6605ec74eb52a0cd0df163c135260006\": rpc error: code = NotFound desc = could not find container \"0808077f381c53b0b7f5dffa7080fbbf6605ec74eb52a0cd0df163c135260006\": container with ID starting with 0808077f381c53b0b7f5dffa7080fbbf6605ec74eb52a0cd0df163c135260006 not found: ID does not exist" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.450422 4900 scope.go:117] "RemoveContainer" containerID="d6e0a0af1af2784d671b0260deb0bd3553dba90d889d123de5312a7d45de2678" Jan 27 12:32:30 crc kubenswrapper[4900]: E0127 12:32:30.450785 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6e0a0af1af2784d671b0260deb0bd3553dba90d889d123de5312a7d45de2678\": container with ID starting with d6e0a0af1af2784d671b0260deb0bd3553dba90d889d123de5312a7d45de2678 not found: ID does not exist" containerID="d6e0a0af1af2784d671b0260deb0bd3553dba90d889d123de5312a7d45de2678" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.450831 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6e0a0af1af2784d671b0260deb0bd3553dba90d889d123de5312a7d45de2678"} err="failed to get container status \"d6e0a0af1af2784d671b0260deb0bd3553dba90d889d123de5312a7d45de2678\": rpc error: code = NotFound desc = could not find container \"d6e0a0af1af2784d671b0260deb0bd3553dba90d889d123de5312a7d45de2678\": container with ID starting with d6e0a0af1af2784d671b0260deb0bd3553dba90d889d123de5312a7d45de2678 not found: ID does not exist" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.450849 4900 scope.go:117] "RemoveContainer" containerID="a4361e38b46e40aad517c1d78e85dfc2c2cf5a6639fc9abdfbda2cbc02a3860f" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.451953 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-r6fg7"] Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.463163 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8kcp8"] Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.467165 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8kcp8"] Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.468950 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93a3d7a3-dddf-49e3-be5b-5369108a5e13-utilities\") pod \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\" (UID: \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.468993 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93a3d7a3-dddf-49e3-be5b-5369108a5e13-catalog-content\") pod \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\" (UID: \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.469070 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86867c21-fd52-44e7-85d2-87da48322397-catalog-content\") pod \"86867c21-fd52-44e7-85d2-87da48322397\" (UID: \"86867c21-fd52-44e7-85d2-87da48322397\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.469092 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spv6t\" (UniqueName: \"kubernetes.io/projected/93a3d7a3-dddf-49e3-be5b-5369108a5e13-kube-api-access-spv6t\") pod \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\" (UID: \"93a3d7a3-dddf-49e3-be5b-5369108a5e13\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.469117 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86867c21-fd52-44e7-85d2-87da48322397-utilities\") pod \"86867c21-fd52-44e7-85d2-87da48322397\" (UID: \"86867c21-fd52-44e7-85d2-87da48322397\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.469132 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkh6z\" (UniqueName: \"kubernetes.io/projected/86867c21-fd52-44e7-85d2-87da48322397-kube-api-access-rkh6z\") pod \"86867c21-fd52-44e7-85d2-87da48322397\" (UID: \"86867c21-fd52-44e7-85d2-87da48322397\") " Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.469800 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93a3d7a3-dddf-49e3-be5b-5369108a5e13-utilities" (OuterVolumeSpecName: "utilities") pod "93a3d7a3-dddf-49e3-be5b-5369108a5e13" (UID: "93a3d7a3-dddf-49e3-be5b-5369108a5e13"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.472391 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86867c21-fd52-44e7-85d2-87da48322397-utilities" (OuterVolumeSpecName: "utilities") pod "86867c21-fd52-44e7-85d2-87da48322397" (UID: "86867c21-fd52-44e7-85d2-87da48322397"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.473669 4900 scope.go:117] "RemoveContainer" containerID="a4361e38b46e40aad517c1d78e85dfc2c2cf5a6639fc9abdfbda2cbc02a3860f" Jan 27 12:32:30 crc kubenswrapper[4900]: E0127 12:32:30.475554 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4361e38b46e40aad517c1d78e85dfc2c2cf5a6639fc9abdfbda2cbc02a3860f\": container with ID starting with a4361e38b46e40aad517c1d78e85dfc2c2cf5a6639fc9abdfbda2cbc02a3860f not found: ID does not exist" containerID="a4361e38b46e40aad517c1d78e85dfc2c2cf5a6639fc9abdfbda2cbc02a3860f" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.475645 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4361e38b46e40aad517c1d78e85dfc2c2cf5a6639fc9abdfbda2cbc02a3860f"} err="failed to get container status \"a4361e38b46e40aad517c1d78e85dfc2c2cf5a6639fc9abdfbda2cbc02a3860f\": rpc error: code = NotFound desc = could not find container \"a4361e38b46e40aad517c1d78e85dfc2c2cf5a6639fc9abdfbda2cbc02a3860f\": container with ID starting with a4361e38b46e40aad517c1d78e85dfc2c2cf5a6639fc9abdfbda2cbc02a3860f not found: ID does not exist" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.475711 4900 scope.go:117] "RemoveContainer" containerID="e5fcd44b18666d4587e5663923891df7d0036bbca45f909b308f25954c2a32a1" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.478508 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86867c21-fd52-44e7-85d2-87da48322397-kube-api-access-rkh6z" (OuterVolumeSpecName: "kube-api-access-rkh6z") pod "86867c21-fd52-44e7-85d2-87da48322397" (UID: "86867c21-fd52-44e7-85d2-87da48322397"). InnerVolumeSpecName "kube-api-access-rkh6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.488639 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93a3d7a3-dddf-49e3-be5b-5369108a5e13-kube-api-access-spv6t" (OuterVolumeSpecName: "kube-api-access-spv6t") pod "93a3d7a3-dddf-49e3-be5b-5369108a5e13" (UID: "93a3d7a3-dddf-49e3-be5b-5369108a5e13"). InnerVolumeSpecName "kube-api-access-spv6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.492671 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c526262-6ee4-4526-91d7-614b3cd91082" path="/var/lib/kubelet/pods/0c526262-6ee4-4526-91d7-614b3cd91082/volumes" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.499869 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" path="/var/lib/kubelet/pods/354400fa-dc6c-4435-b9cf-09b5e76a6ef2/volumes" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.507593 4900 scope.go:117] "RemoveContainer" containerID="902878fa2d3acce0e9f7407e81451b1fdb2e3187dcec72548cbdc07571bd683e" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.546108 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-grrcf"] Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.546199 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-grrcf"] Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.558555 4900 scope.go:117] "RemoveContainer" containerID="01ca599c69923934750353ba3076a6c802484569319254cad2c3f950e449d0f5" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.560036 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86867c21-fd52-44e7-85d2-87da48322397-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "86867c21-fd52-44e7-85d2-87da48322397" (UID: "86867c21-fd52-44e7-85d2-87da48322397"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.570944 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86867c21-fd52-44e7-85d2-87da48322397-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.571395 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spv6t\" (UniqueName: \"kubernetes.io/projected/93a3d7a3-dddf-49e3-be5b-5369108a5e13-kube-api-access-spv6t\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.571430 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86867c21-fd52-44e7-85d2-87da48322397-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.571443 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkh6z\" (UniqueName: \"kubernetes.io/projected/86867c21-fd52-44e7-85d2-87da48322397-kube-api-access-rkh6z\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.571454 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93a3d7a3-dddf-49e3-be5b-5369108a5e13-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.574856 4900 scope.go:117] "RemoveContainer" containerID="e5fcd44b18666d4587e5663923891df7d0036bbca45f909b308f25954c2a32a1" Jan 27 12:32:30 crc kubenswrapper[4900]: E0127 12:32:30.575888 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5fcd44b18666d4587e5663923891df7d0036bbca45f909b308f25954c2a32a1\": container with ID starting with e5fcd44b18666d4587e5663923891df7d0036bbca45f909b308f25954c2a32a1 not found: ID does not exist" containerID="e5fcd44b18666d4587e5663923891df7d0036bbca45f909b308f25954c2a32a1" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.575944 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5fcd44b18666d4587e5663923891df7d0036bbca45f909b308f25954c2a32a1"} err="failed to get container status \"e5fcd44b18666d4587e5663923891df7d0036bbca45f909b308f25954c2a32a1\": rpc error: code = NotFound desc = could not find container \"e5fcd44b18666d4587e5663923891df7d0036bbca45f909b308f25954c2a32a1\": container with ID starting with e5fcd44b18666d4587e5663923891df7d0036bbca45f909b308f25954c2a32a1 not found: ID does not exist" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.576011 4900 scope.go:117] "RemoveContainer" containerID="902878fa2d3acce0e9f7407e81451b1fdb2e3187dcec72548cbdc07571bd683e" Jan 27 12:32:30 crc kubenswrapper[4900]: E0127 12:32:30.576452 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"902878fa2d3acce0e9f7407e81451b1fdb2e3187dcec72548cbdc07571bd683e\": container with ID starting with 902878fa2d3acce0e9f7407e81451b1fdb2e3187dcec72548cbdc07571bd683e not found: ID does not exist" containerID="902878fa2d3acce0e9f7407e81451b1fdb2e3187dcec72548cbdc07571bd683e" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.576500 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"902878fa2d3acce0e9f7407e81451b1fdb2e3187dcec72548cbdc07571bd683e"} err="failed to get container status \"902878fa2d3acce0e9f7407e81451b1fdb2e3187dcec72548cbdc07571bd683e\": rpc error: code = NotFound desc = could not find container \"902878fa2d3acce0e9f7407e81451b1fdb2e3187dcec72548cbdc07571bd683e\": container with ID starting with 902878fa2d3acce0e9f7407e81451b1fdb2e3187dcec72548cbdc07571bd683e not found: ID does not exist" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.576537 4900 scope.go:117] "RemoveContainer" containerID="01ca599c69923934750353ba3076a6c802484569319254cad2c3f950e449d0f5" Jan 27 12:32:30 crc kubenswrapper[4900]: E0127 12:32:30.576776 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01ca599c69923934750353ba3076a6c802484569319254cad2c3f950e449d0f5\": container with ID starting with 01ca599c69923934750353ba3076a6c802484569319254cad2c3f950e449d0f5 not found: ID does not exist" containerID="01ca599c69923934750353ba3076a6c802484569319254cad2c3f950e449d0f5" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.576814 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01ca599c69923934750353ba3076a6c802484569319254cad2c3f950e449d0f5"} err="failed to get container status \"01ca599c69923934750353ba3076a6c802484569319254cad2c3f950e449d0f5\": rpc error: code = NotFound desc = could not find container \"01ca599c69923934750353ba3076a6c802484569319254cad2c3f950e449d0f5\": container with ID starting with 01ca599c69923934750353ba3076a6c802484569319254cad2c3f950e449d0f5 not found: ID does not exist" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.646727 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93a3d7a3-dddf-49e3-be5b-5369108a5e13-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "93a3d7a3-dddf-49e3-be5b-5369108a5e13" (UID: "93a3d7a3-dddf-49e3-be5b-5369108a5e13"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:32:30 crc kubenswrapper[4900]: I0127 12:32:30.672715 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93a3d7a3-dddf-49e3-be5b-5369108a5e13-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.074046 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jvcrt" event={"ID":"86867c21-fd52-44e7-85d2-87da48322397","Type":"ContainerDied","Data":"bd512cb49e94dd7f0a85ffa4d0d1788262c9a05746796b60a472be8302717d9e"} Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.074133 4900 scope.go:117] "RemoveContainer" containerID="a5338c72764cd468a9a4699ae43a19077c903f169068a51d6494b6f9cb03da8e" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.074239 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jvcrt" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.079716 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9s4cn" event={"ID":"93a3d7a3-dddf-49e3-be5b-5369108a5e13","Type":"ContainerDied","Data":"b70ad1b84c6875c5d92740e8b6301239bbdc39a10c5e1a62a0348c8428a9d8ac"} Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.079818 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9s4cn" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.083287 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.098730 4900 scope.go:117] "RemoveContainer" containerID="d1ac6f9cb085f6dda71bea07763408defa033a038093cb9cb9fc11192b69f77b" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.154779 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jvcrt"] Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.158651 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jvcrt"] Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162153 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-k2gzb"] Jan 27 12:32:31 crc kubenswrapper[4900]: E0127 12:32:31.162468 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" containerName="extract-utilities" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162489 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" containerName="extract-utilities" Jan 27 12:32:31 crc kubenswrapper[4900]: E0127 12:32:31.162504 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="754ddb47-7891-4142-b52c-98a29ca40078" containerName="registry-server" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162511 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="754ddb47-7891-4142-b52c-98a29ca40078" containerName="registry-server" Jan 27 12:32:31 crc kubenswrapper[4900]: E0127 12:32:31.162522 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86867c21-fd52-44e7-85d2-87da48322397" containerName="extract-utilities" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162531 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="86867c21-fd52-44e7-85d2-87da48322397" containerName="extract-utilities" Jan 27 12:32:31 crc kubenswrapper[4900]: E0127 12:32:31.162542 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86867c21-fd52-44e7-85d2-87da48322397" containerName="registry-server" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162550 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="86867c21-fd52-44e7-85d2-87da48322397" containerName="registry-server" Jan 27 12:32:31 crc kubenswrapper[4900]: E0127 12:32:31.162567 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" containerName="registry-server" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162576 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" containerName="registry-server" Jan 27 12:32:31 crc kubenswrapper[4900]: E0127 12:32:31.162585 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="754ddb47-7891-4142-b52c-98a29ca40078" containerName="extract-utilities" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162592 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="754ddb47-7891-4142-b52c-98a29ca40078" containerName="extract-utilities" Jan 27 12:32:31 crc kubenswrapper[4900]: E0127 12:32:31.162606 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" containerName="registry-server" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162614 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" containerName="registry-server" Jan 27 12:32:31 crc kubenswrapper[4900]: E0127 12:32:31.162623 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c526262-6ee4-4526-91d7-614b3cd91082" containerName="marketplace-operator" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162630 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c526262-6ee4-4526-91d7-614b3cd91082" containerName="marketplace-operator" Jan 27 12:32:31 crc kubenswrapper[4900]: E0127 12:32:31.162638 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" containerName="extract-utilities" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162645 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" containerName="extract-utilities" Jan 27 12:32:31 crc kubenswrapper[4900]: E0127 12:32:31.162654 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" containerName="extract-content" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162662 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" containerName="extract-content" Jan 27 12:32:31 crc kubenswrapper[4900]: E0127 12:32:31.162674 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="754ddb47-7891-4142-b52c-98a29ca40078" containerName="extract-content" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162681 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="754ddb47-7891-4142-b52c-98a29ca40078" containerName="extract-content" Jan 27 12:32:31 crc kubenswrapper[4900]: E0127 12:32:31.162694 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86867c21-fd52-44e7-85d2-87da48322397" containerName="extract-content" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162701 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="86867c21-fd52-44e7-85d2-87da48322397" containerName="extract-content" Jan 27 12:32:31 crc kubenswrapper[4900]: E0127 12:32:31.162710 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" containerName="extract-content" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162717 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" containerName="extract-content" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162841 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c526262-6ee4-4526-91d7-614b3cd91082" containerName="marketplace-operator" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162860 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" containerName="registry-server" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162871 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="86867c21-fd52-44e7-85d2-87da48322397" containerName="registry-server" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162883 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="754ddb47-7891-4142-b52c-98a29ca40078" containerName="registry-server" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.162891 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="354400fa-dc6c-4435-b9cf-09b5e76a6ef2" containerName="registry-server" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.163869 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.167110 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.176516 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-k2gzb"] Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.182461 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50203e3a-7094-487f-9d3b-a9467363dfaf-catalog-content\") pod \"redhat-marketplace-k2gzb\" (UID: \"50203e3a-7094-487f-9d3b-a9467363dfaf\") " pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.182652 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2pjf\" (UniqueName: \"kubernetes.io/projected/50203e3a-7094-487f-9d3b-a9467363dfaf-kube-api-access-z2pjf\") pod \"redhat-marketplace-k2gzb\" (UID: \"50203e3a-7094-487f-9d3b-a9467363dfaf\") " pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.182735 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50203e3a-7094-487f-9d3b-a9467363dfaf-utilities\") pod \"redhat-marketplace-k2gzb\" (UID: \"50203e3a-7094-487f-9d3b-a9467363dfaf\") " pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.183834 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9s4cn"] Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.186822 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9s4cn"] Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.284551 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50203e3a-7094-487f-9d3b-a9467363dfaf-catalog-content\") pod \"redhat-marketplace-k2gzb\" (UID: \"50203e3a-7094-487f-9d3b-a9467363dfaf\") " pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.284990 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2pjf\" (UniqueName: \"kubernetes.io/projected/50203e3a-7094-487f-9d3b-a9467363dfaf-kube-api-access-z2pjf\") pod \"redhat-marketplace-k2gzb\" (UID: \"50203e3a-7094-487f-9d3b-a9467363dfaf\") " pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.285606 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50203e3a-7094-487f-9d3b-a9467363dfaf-utilities\") pod \"redhat-marketplace-k2gzb\" (UID: \"50203e3a-7094-487f-9d3b-a9467363dfaf\") " pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.285421 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50203e3a-7094-487f-9d3b-a9467363dfaf-catalog-content\") pod \"redhat-marketplace-k2gzb\" (UID: \"50203e3a-7094-487f-9d3b-a9467363dfaf\") " pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.286193 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50203e3a-7094-487f-9d3b-a9467363dfaf-utilities\") pod \"redhat-marketplace-k2gzb\" (UID: \"50203e3a-7094-487f-9d3b-a9467363dfaf\") " pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.303152 4900 scope.go:117] "RemoveContainer" containerID="c4b18bcd28b16e9f8937d1f037c6af60490d1ba25dcc5eda168248457a2e42be" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.307672 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2pjf\" (UniqueName: \"kubernetes.io/projected/50203e3a-7094-487f-9d3b-a9467363dfaf-kube-api-access-z2pjf\") pod \"redhat-marketplace-k2gzb\" (UID: \"50203e3a-7094-487f-9d3b-a9467363dfaf\") " pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.331598 4900 scope.go:117] "RemoveContainer" containerID="4628df3d3bc34de97560d8cb0b2fce0302a9a745a901b04ff425f5f7357864ff" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.346411 4900 scope.go:117] "RemoveContainer" containerID="7cf8cc2e0939176c9937ce00b46e9a1c83d4e636719df6fdb77d87e33709bedc" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.363878 4900 scope.go:117] "RemoveContainer" containerID="af4dfb7e99827cc962992297dfd9b862278233f31bcabed00a1f6164989f589f" Jan 27 12:32:31 crc kubenswrapper[4900]: I0127 12:32:31.480456 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:32 crc kubenswrapper[4900]: I0127 12:32:31.860274 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-k2gzb"] Jan 27 12:32:32 crc kubenswrapper[4900]: W0127 12:32:31.864132 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod50203e3a_7094_487f_9d3b_a9467363dfaf.slice/crio-1633fb5a180a4dc045782206248f50a6ccac1a7509a527255084dba93073181d WatchSource:0}: Error finding container 1633fb5a180a4dc045782206248f50a6ccac1a7509a527255084dba93073181d: Status 404 returned error can't find the container with id 1633fb5a180a4dc045782206248f50a6ccac1a7509a527255084dba93073181d Jan 27 12:32:32 crc kubenswrapper[4900]: I0127 12:32:32.089751 4900 generic.go:334] "Generic (PLEG): container finished" podID="50203e3a-7094-487f-9d3b-a9467363dfaf" containerID="373814efa68b8b7f808f598a5cb0419e402df6a08477159b3c3ac1d7ec5191fe" exitCode=0 Jan 27 12:32:32 crc kubenswrapper[4900]: I0127 12:32:32.089847 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k2gzb" event={"ID":"50203e3a-7094-487f-9d3b-a9467363dfaf","Type":"ContainerDied","Data":"373814efa68b8b7f808f598a5cb0419e402df6a08477159b3c3ac1d7ec5191fe"} Jan 27 12:32:32 crc kubenswrapper[4900]: I0127 12:32:32.089898 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k2gzb" event={"ID":"50203e3a-7094-487f-9d3b-a9467363dfaf","Type":"ContainerStarted","Data":"1633fb5a180a4dc045782206248f50a6ccac1a7509a527255084dba93073181d"} Jan 27 12:32:32 crc kubenswrapper[4900]: I0127 12:32:32.489912 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="754ddb47-7891-4142-b52c-98a29ca40078" path="/var/lib/kubelet/pods/754ddb47-7891-4142-b52c-98a29ca40078/volumes" Jan 27 12:32:32 crc kubenswrapper[4900]: I0127 12:32:32.490713 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86867c21-fd52-44e7-85d2-87da48322397" path="/var/lib/kubelet/pods/86867c21-fd52-44e7-85d2-87da48322397/volumes" Jan 27 12:32:32 crc kubenswrapper[4900]: I0127 12:32:32.491379 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93a3d7a3-dddf-49e3-be5b-5369108a5e13" path="/var/lib/kubelet/pods/93a3d7a3-dddf-49e3-be5b-5369108a5e13/volumes" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.099904 4900 generic.go:334] "Generic (PLEG): container finished" podID="50203e3a-7094-487f-9d3b-a9467363dfaf" containerID="d1289571b994e3454e920e1c80bb0174d3fcea50423465d6eb3c72272c47d3e0" exitCode=0 Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.101388 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k2gzb" event={"ID":"50203e3a-7094-487f-9d3b-a9467363dfaf","Type":"ContainerDied","Data":"d1289571b994e3454e920e1c80bb0174d3fcea50423465d6eb3c72272c47d3e0"} Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.327764 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8kh64"] Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.328858 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.331513 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.345661 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8kh64"] Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.412751 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sl4mc\" (UniqueName: \"kubernetes.io/projected/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-kube-api-access-sl4mc\") pod \"certified-operators-8kh64\" (UID: \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\") " pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.412853 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-utilities\") pod \"certified-operators-8kh64\" (UID: \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\") " pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.412885 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-catalog-content\") pod \"certified-operators-8kh64\" (UID: \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\") " pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.513897 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sl4mc\" (UniqueName: \"kubernetes.io/projected/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-kube-api-access-sl4mc\") pod \"certified-operators-8kh64\" (UID: \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\") " pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.514073 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-utilities\") pod \"certified-operators-8kh64\" (UID: \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\") " pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.514111 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-catalog-content\") pod \"certified-operators-8kh64\" (UID: \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\") " pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.515013 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-utilities\") pod \"certified-operators-8kh64\" (UID: \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\") " pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.515131 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-catalog-content\") pod \"certified-operators-8kh64\" (UID: \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\") " pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.531944 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zkzmp"] Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.533186 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.538049 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.548532 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sl4mc\" (UniqueName: \"kubernetes.io/projected/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-kube-api-access-sl4mc\") pod \"certified-operators-8kh64\" (UID: \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\") " pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.551651 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zkzmp"] Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.644045 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.716237 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlsn4\" (UniqueName: \"kubernetes.io/projected/008df9f5-f660-4c50-b9d1-adf18fa073d1-kube-api-access-vlsn4\") pod \"community-operators-zkzmp\" (UID: \"008df9f5-f660-4c50-b9d1-adf18fa073d1\") " pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.716299 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/008df9f5-f660-4c50-b9d1-adf18fa073d1-catalog-content\") pod \"community-operators-zkzmp\" (UID: \"008df9f5-f660-4c50-b9d1-adf18fa073d1\") " pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.716351 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/008df9f5-f660-4c50-b9d1-adf18fa073d1-utilities\") pod \"community-operators-zkzmp\" (UID: \"008df9f5-f660-4c50-b9d1-adf18fa073d1\") " pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.817505 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/008df9f5-f660-4c50-b9d1-adf18fa073d1-catalog-content\") pod \"community-operators-zkzmp\" (UID: \"008df9f5-f660-4c50-b9d1-adf18fa073d1\") " pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.817615 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/008df9f5-f660-4c50-b9d1-adf18fa073d1-utilities\") pod \"community-operators-zkzmp\" (UID: \"008df9f5-f660-4c50-b9d1-adf18fa073d1\") " pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.817665 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlsn4\" (UniqueName: \"kubernetes.io/projected/008df9f5-f660-4c50-b9d1-adf18fa073d1-kube-api-access-vlsn4\") pod \"community-operators-zkzmp\" (UID: \"008df9f5-f660-4c50-b9d1-adf18fa073d1\") " pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.819014 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/008df9f5-f660-4c50-b9d1-adf18fa073d1-utilities\") pod \"community-operators-zkzmp\" (UID: \"008df9f5-f660-4c50-b9d1-adf18fa073d1\") " pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.819207 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/008df9f5-f660-4c50-b9d1-adf18fa073d1-catalog-content\") pod \"community-operators-zkzmp\" (UID: \"008df9f5-f660-4c50-b9d1-adf18fa073d1\") " pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.844697 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlsn4\" (UniqueName: \"kubernetes.io/projected/008df9f5-f660-4c50-b9d1-adf18fa073d1-kube-api-access-vlsn4\") pod \"community-operators-zkzmp\" (UID: \"008df9f5-f660-4c50-b9d1-adf18fa073d1\") " pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:33 crc kubenswrapper[4900]: I0127 12:32:33.867213 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:34 crc kubenswrapper[4900]: I0127 12:32:34.045158 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8kh64"] Jan 27 12:32:34 crc kubenswrapper[4900]: I0127 12:32:34.106931 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8kh64" event={"ID":"cd975c7e-d2bf-43ac-bc97-e8efa9e00611","Type":"ContainerStarted","Data":"8b62554df23bbd0b4e951555fd98e876e2baeec32e0dc50ba6a57a772dfa5473"} Jan 27 12:32:34 crc kubenswrapper[4900]: I0127 12:32:34.264928 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zkzmp"] Jan 27 12:32:34 crc kubenswrapper[4900]: W0127 12:32:34.268959 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod008df9f5_f660_4c50_b9d1_adf18fa073d1.slice/crio-ac78460c2d03d2e47c8b6a7b6e75d08661eba63c34ea4942c03aa102a62266f5 WatchSource:0}: Error finding container ac78460c2d03d2e47c8b6a7b6e75d08661eba63c34ea4942c03aa102a62266f5: Status 404 returned error can't find the container with id ac78460c2d03d2e47c8b6a7b6e75d08661eba63c34ea4942c03aa102a62266f5 Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.115844 4900 generic.go:334] "Generic (PLEG): container finished" podID="008df9f5-f660-4c50-b9d1-adf18fa073d1" containerID="75321cfaaad9417ad1aca77a5463695c77c2cf9d6a9e92e5ea14d742e6a94aef" exitCode=0 Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.115965 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkzmp" event={"ID":"008df9f5-f660-4c50-b9d1-adf18fa073d1","Type":"ContainerDied","Data":"75321cfaaad9417ad1aca77a5463695c77c2cf9d6a9e92e5ea14d742e6a94aef"} Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.116235 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkzmp" event={"ID":"008df9f5-f660-4c50-b9d1-adf18fa073d1","Type":"ContainerStarted","Data":"ac78460c2d03d2e47c8b6a7b6e75d08661eba63c34ea4942c03aa102a62266f5"} Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.118158 4900 generic.go:334] "Generic (PLEG): container finished" podID="cd975c7e-d2bf-43ac-bc97-e8efa9e00611" containerID="774b15fedf47f2461594eebebd4f77e1c8decef24bf39c77266b7a0b10e7fbf4" exitCode=0 Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.118200 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8kh64" event={"ID":"cd975c7e-d2bf-43ac-bc97-e8efa9e00611","Type":"ContainerDied","Data":"774b15fedf47f2461594eebebd4f77e1c8decef24bf39c77266b7a0b10e7fbf4"} Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.121321 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k2gzb" event={"ID":"50203e3a-7094-487f-9d3b-a9467363dfaf","Type":"ContainerStarted","Data":"a1c28a73713c934c382ce62db69af0815ac5bdd8be7f3f39c7a29eb01e2363ca"} Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.177514 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-k2gzb" podStartSLOduration=2.000750039 podStartE2EDuration="4.177485235s" podCreationTimestamp="2026-01-27 12:32:31 +0000 UTC" firstStartedPulling="2026-01-27 12:32:32.090862758 +0000 UTC m=+379.327890968" lastFinishedPulling="2026-01-27 12:32:34.267597964 +0000 UTC m=+381.504626164" observedRunningTime="2026-01-27 12:32:35.174817946 +0000 UTC m=+382.411846156" watchObservedRunningTime="2026-01-27 12:32:35.177485235 +0000 UTC m=+382.414513445" Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.727205 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vc5r8"] Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.728276 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.730566 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.737950 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vc5r8"] Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.749857 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c42ffb69-dba9-4ce2-8fe6-a5581776859f-catalog-content\") pod \"redhat-operators-vc5r8\" (UID: \"c42ffb69-dba9-4ce2-8fe6-a5581776859f\") " pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.749954 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjpxr\" (UniqueName: \"kubernetes.io/projected/c42ffb69-dba9-4ce2-8fe6-a5581776859f-kube-api-access-zjpxr\") pod \"redhat-operators-vc5r8\" (UID: \"c42ffb69-dba9-4ce2-8fe6-a5581776859f\") " pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.750045 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c42ffb69-dba9-4ce2-8fe6-a5581776859f-utilities\") pod \"redhat-operators-vc5r8\" (UID: \"c42ffb69-dba9-4ce2-8fe6-a5581776859f\") " pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.850696 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c42ffb69-dba9-4ce2-8fe6-a5581776859f-catalog-content\") pod \"redhat-operators-vc5r8\" (UID: \"c42ffb69-dba9-4ce2-8fe6-a5581776859f\") " pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.850857 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjpxr\" (UniqueName: \"kubernetes.io/projected/c42ffb69-dba9-4ce2-8fe6-a5581776859f-kube-api-access-zjpxr\") pod \"redhat-operators-vc5r8\" (UID: \"c42ffb69-dba9-4ce2-8fe6-a5581776859f\") " pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.850894 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c42ffb69-dba9-4ce2-8fe6-a5581776859f-utilities\") pod \"redhat-operators-vc5r8\" (UID: \"c42ffb69-dba9-4ce2-8fe6-a5581776859f\") " pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.851474 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c42ffb69-dba9-4ce2-8fe6-a5581776859f-utilities\") pod \"redhat-operators-vc5r8\" (UID: \"c42ffb69-dba9-4ce2-8fe6-a5581776859f\") " pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.851765 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c42ffb69-dba9-4ce2-8fe6-a5581776859f-catalog-content\") pod \"redhat-operators-vc5r8\" (UID: \"c42ffb69-dba9-4ce2-8fe6-a5581776859f\") " pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:32:35 crc kubenswrapper[4900]: I0127 12:32:35.873542 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjpxr\" (UniqueName: \"kubernetes.io/projected/c42ffb69-dba9-4ce2-8fe6-a5581776859f-kube-api-access-zjpxr\") pod \"redhat-operators-vc5r8\" (UID: \"c42ffb69-dba9-4ce2-8fe6-a5581776859f\") " pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:32:36 crc kubenswrapper[4900]: I0127 12:32:36.045083 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:32:36 crc kubenswrapper[4900]: I0127 12:32:36.602835 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vc5r8"] Jan 27 12:32:36 crc kubenswrapper[4900]: W0127 12:32:36.609400 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc42ffb69_dba9_4ce2_8fe6_a5581776859f.slice/crio-b8f73ba6966389e60ace65e6a69e8506ebef973182b9f3e87ac6bdfef0c4cdd3 WatchSource:0}: Error finding container b8f73ba6966389e60ace65e6a69e8506ebef973182b9f3e87ac6bdfef0c4cdd3: Status 404 returned error can't find the container with id b8f73ba6966389e60ace65e6a69e8506ebef973182b9f3e87ac6bdfef0c4cdd3 Jan 27 12:32:37 crc kubenswrapper[4900]: I0127 12:32:37.135571 4900 generic.go:334] "Generic (PLEG): container finished" podID="008df9f5-f660-4c50-b9d1-adf18fa073d1" containerID="9054ab975a8d2ba5c1925ff9be1bf592423822b816fdf1c4150b903c00fc2bf0" exitCode=0 Jan 27 12:32:37 crc kubenswrapper[4900]: I0127 12:32:37.135701 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkzmp" event={"ID":"008df9f5-f660-4c50-b9d1-adf18fa073d1","Type":"ContainerDied","Data":"9054ab975a8d2ba5c1925ff9be1bf592423822b816fdf1c4150b903c00fc2bf0"} Jan 27 12:32:37 crc kubenswrapper[4900]: I0127 12:32:37.138129 4900 generic.go:334] "Generic (PLEG): container finished" podID="cd975c7e-d2bf-43ac-bc97-e8efa9e00611" containerID="225137c4a32926a60fc312b242b5acb397148c56b87ea363acafa51c52983477" exitCode=0 Jan 27 12:32:37 crc kubenswrapper[4900]: I0127 12:32:37.138217 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8kh64" event={"ID":"cd975c7e-d2bf-43ac-bc97-e8efa9e00611","Type":"ContainerDied","Data":"225137c4a32926a60fc312b242b5acb397148c56b87ea363acafa51c52983477"} Jan 27 12:32:37 crc kubenswrapper[4900]: I0127 12:32:37.142474 4900 generic.go:334] "Generic (PLEG): container finished" podID="c42ffb69-dba9-4ce2-8fe6-a5581776859f" containerID="1f571c9de344d4f9fb7c7478473d790f816171c77aa06e0e1a30e0eb51570abe" exitCode=0 Jan 27 12:32:37 crc kubenswrapper[4900]: I0127 12:32:37.142507 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vc5r8" event={"ID":"c42ffb69-dba9-4ce2-8fe6-a5581776859f","Type":"ContainerDied","Data":"1f571c9de344d4f9fb7c7478473d790f816171c77aa06e0e1a30e0eb51570abe"} Jan 27 12:32:37 crc kubenswrapper[4900]: I0127 12:32:37.142527 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vc5r8" event={"ID":"c42ffb69-dba9-4ce2-8fe6-a5581776859f","Type":"ContainerStarted","Data":"b8f73ba6966389e60ace65e6a69e8506ebef973182b9f3e87ac6bdfef0c4cdd3"} Jan 27 12:32:40 crc kubenswrapper[4900]: I0127 12:32:40.162002 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zkzmp" event={"ID":"008df9f5-f660-4c50-b9d1-adf18fa073d1","Type":"ContainerStarted","Data":"ce669fe8388e134a5f4065dd7d03b506938a2e4add32a54ea330eb58f8b2a6df"} Jan 27 12:32:40 crc kubenswrapper[4900]: I0127 12:32:40.183879 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zkzmp" podStartSLOduration=2.8428587910000003 podStartE2EDuration="7.183854443s" podCreationTimestamp="2026-01-27 12:32:33 +0000 UTC" firstStartedPulling="2026-01-27 12:32:35.11740545 +0000 UTC m=+382.354433660" lastFinishedPulling="2026-01-27 12:32:39.458401102 +0000 UTC m=+386.695429312" observedRunningTime="2026-01-27 12:32:40.183308627 +0000 UTC m=+387.420336847" watchObservedRunningTime="2026-01-27 12:32:40.183854443 +0000 UTC m=+387.420882653" Jan 27 12:32:41 crc kubenswrapper[4900]: I0127 12:32:41.170084 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vc5r8" event={"ID":"c42ffb69-dba9-4ce2-8fe6-a5581776859f","Type":"ContainerStarted","Data":"dab9ea41932b18af3b631040a15b3e44e8707c27ffb564e0ac35771e8ad76c8a"} Jan 27 12:32:41 crc kubenswrapper[4900]: I0127 12:32:41.173980 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8kh64" event={"ID":"cd975c7e-d2bf-43ac-bc97-e8efa9e00611","Type":"ContainerStarted","Data":"8bbc29bf4296417df7742ca79508ba18e81b0d70dce36fd777b5dbbc227dd5b0"} Jan 27 12:32:41 crc kubenswrapper[4900]: I0127 12:32:41.220881 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8kh64" podStartSLOduration=2.860858746 podStartE2EDuration="8.220858013s" podCreationTimestamp="2026-01-27 12:32:33 +0000 UTC" firstStartedPulling="2026-01-27 12:32:35.119605565 +0000 UTC m=+382.356633765" lastFinishedPulling="2026-01-27 12:32:40.479604822 +0000 UTC m=+387.716633032" observedRunningTime="2026-01-27 12:32:41.217845133 +0000 UTC m=+388.454873343" watchObservedRunningTime="2026-01-27 12:32:41.220858013 +0000 UTC m=+388.457886223" Jan 27 12:32:41 crc kubenswrapper[4900]: I0127 12:32:41.480756 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:41 crc kubenswrapper[4900]: I0127 12:32:41.480807 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:41 crc kubenswrapper[4900]: I0127 12:32:41.539890 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:42 crc kubenswrapper[4900]: I0127 12:32:42.181000 4900 generic.go:334] "Generic (PLEG): container finished" podID="c42ffb69-dba9-4ce2-8fe6-a5581776859f" containerID="dab9ea41932b18af3b631040a15b3e44e8707c27ffb564e0ac35771e8ad76c8a" exitCode=0 Jan 27 12:32:42 crc kubenswrapper[4900]: I0127 12:32:42.181119 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vc5r8" event={"ID":"c42ffb69-dba9-4ce2-8fe6-a5581776859f","Type":"ContainerDied","Data":"dab9ea41932b18af3b631040a15b3e44e8707c27ffb564e0ac35771e8ad76c8a"} Jan 27 12:32:42 crc kubenswrapper[4900]: I0127 12:32:42.184049 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 12:32:42 crc kubenswrapper[4900]: I0127 12:32:42.237944 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-k2gzb" Jan 27 12:32:43 crc kubenswrapper[4900]: I0127 12:32:43.645242 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:43 crc kubenswrapper[4900]: I0127 12:32:43.646561 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:43 crc kubenswrapper[4900]: I0127 12:32:43.687511 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:43 crc kubenswrapper[4900]: I0127 12:32:43.868492 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:43 crc kubenswrapper[4900]: I0127 12:32:43.868542 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:43 crc kubenswrapper[4900]: I0127 12:32:43.911858 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:44 crc kubenswrapper[4900]: I0127 12:32:44.198088 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vc5r8" event={"ID":"c42ffb69-dba9-4ce2-8fe6-a5581776859f","Type":"ContainerStarted","Data":"ad960c495947d39725977623d1a15296d45c5f2f89bcb35d5cfce17f07a5fcf8"} Jan 27 12:32:44 crc kubenswrapper[4900]: I0127 12:32:44.227754 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vc5r8" podStartSLOduration=2.898951709 podStartE2EDuration="9.227720446s" podCreationTimestamp="2026-01-27 12:32:35 +0000 UTC" firstStartedPulling="2026-01-27 12:32:37.144635735 +0000 UTC m=+384.381663945" lastFinishedPulling="2026-01-27 12:32:43.473404472 +0000 UTC m=+390.710432682" observedRunningTime="2026-01-27 12:32:44.22418334 +0000 UTC m=+391.461211550" watchObservedRunningTime="2026-01-27 12:32:44.227720446 +0000 UTC m=+391.464748656" Jan 27 12:32:44 crc kubenswrapper[4900]: I0127 12:32:44.246976 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zkzmp" Jan 27 12:32:46 crc kubenswrapper[4900]: I0127 12:32:46.045763 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:32:46 crc kubenswrapper[4900]: I0127 12:32:46.046134 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:32:47 crc kubenswrapper[4900]: I0127 12:32:47.087566 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vc5r8" podUID="c42ffb69-dba9-4ce2-8fe6-a5581776859f" containerName="registry-server" probeResult="failure" output=< Jan 27 12:32:47 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 12:32:47 crc kubenswrapper[4900]: > Jan 27 12:32:52 crc kubenswrapper[4900]: I0127 12:32:52.372430 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:32:52 crc kubenswrapper[4900]: I0127 12:32:52.372962 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:32:53 crc kubenswrapper[4900]: I0127 12:32:53.693868 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8kh64" Jan 27 12:32:56 crc kubenswrapper[4900]: I0127 12:32:56.094856 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:32:56 crc kubenswrapper[4900]: I0127 12:32:56.178548 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vc5r8" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.498896 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp"] Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.500182 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.504357 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.504383 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.504417 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.504617 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.505504 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.513113 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp"] Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.645080 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/20e90476-8c9b-49a1-88c5-6cbf89c47d6f-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-wcxbp\" (UID: \"20e90476-8c9b-49a1-88c5-6cbf89c47d6f\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.645197 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfn9f\" (UniqueName: \"kubernetes.io/projected/20e90476-8c9b-49a1-88c5-6cbf89c47d6f-kube-api-access-vfn9f\") pod \"cluster-monitoring-operator-6d5b84845-wcxbp\" (UID: \"20e90476-8c9b-49a1-88c5-6cbf89c47d6f\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.645298 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/20e90476-8c9b-49a1-88c5-6cbf89c47d6f-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-wcxbp\" (UID: \"20e90476-8c9b-49a1-88c5-6cbf89c47d6f\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.746818 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/20e90476-8c9b-49a1-88c5-6cbf89c47d6f-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-wcxbp\" (UID: \"20e90476-8c9b-49a1-88c5-6cbf89c47d6f\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.746924 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/20e90476-8c9b-49a1-88c5-6cbf89c47d6f-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-wcxbp\" (UID: \"20e90476-8c9b-49a1-88c5-6cbf89c47d6f\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.746975 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfn9f\" (UniqueName: \"kubernetes.io/projected/20e90476-8c9b-49a1-88c5-6cbf89c47d6f-kube-api-access-vfn9f\") pod \"cluster-monitoring-operator-6d5b84845-wcxbp\" (UID: \"20e90476-8c9b-49a1-88c5-6cbf89c47d6f\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.748177 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/20e90476-8c9b-49a1-88c5-6cbf89c47d6f-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-wcxbp\" (UID: \"20e90476-8c9b-49a1-88c5-6cbf89c47d6f\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.753701 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/20e90476-8c9b-49a1-88c5-6cbf89c47d6f-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-wcxbp\" (UID: \"20e90476-8c9b-49a1-88c5-6cbf89c47d6f\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.772122 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfn9f\" (UniqueName: \"kubernetes.io/projected/20e90476-8c9b-49a1-88c5-6cbf89c47d6f-kube-api-access-vfn9f\") pod \"cluster-monitoring-operator-6d5b84845-wcxbp\" (UID: \"20e90476-8c9b-49a1-88c5-6cbf89c47d6f\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp" Jan 27 12:33:01 crc kubenswrapper[4900]: I0127 12:33:01.819370 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp" Jan 27 12:33:02 crc kubenswrapper[4900]: I0127 12:33:02.264438 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp"] Jan 27 12:33:02 crc kubenswrapper[4900]: W0127 12:33:02.270311 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod20e90476_8c9b_49a1_88c5_6cbf89c47d6f.slice/crio-5f96bb2378bfc4e814144652cea6eddd00b7fc16cf97ccc7a3fe98a6497e3b4d WatchSource:0}: Error finding container 5f96bb2378bfc4e814144652cea6eddd00b7fc16cf97ccc7a3fe98a6497e3b4d: Status 404 returned error can't find the container with id 5f96bb2378bfc4e814144652cea6eddd00b7fc16cf97ccc7a3fe98a6497e3b4d Jan 27 12:33:02 crc kubenswrapper[4900]: I0127 12:33:02.323232 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp" event={"ID":"20e90476-8c9b-49a1-88c5-6cbf89c47d6f","Type":"ContainerStarted","Data":"5f96bb2378bfc4e814144652cea6eddd00b7fc16cf97ccc7a3fe98a6497e3b4d"} Jan 27 12:33:06 crc kubenswrapper[4900]: I0127 12:33:06.350296 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp" event={"ID":"20e90476-8c9b-49a1-88c5-6cbf89c47d6f","Type":"ContainerStarted","Data":"4568847a25d3e10df7c7147045bd13298e5d603ebf34a43b9dceeb3aad713d6d"} Jan 27 12:33:06 crc kubenswrapper[4900]: I0127 12:33:06.373185 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-wcxbp" podStartSLOduration=1.803024237 podStartE2EDuration="5.373140512s" podCreationTimestamp="2026-01-27 12:33:01 +0000 UTC" firstStartedPulling="2026-01-27 12:33:02.273221785 +0000 UTC m=+409.510249995" lastFinishedPulling="2026-01-27 12:33:05.84333806 +0000 UTC m=+413.080366270" observedRunningTime="2026-01-27 12:33:06.368741741 +0000 UTC m=+413.605769961" watchObservedRunningTime="2026-01-27 12:33:06.373140512 +0000 UTC m=+413.610168722" Jan 27 12:33:06 crc kubenswrapper[4900]: I0127 12:33:06.523420 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l"] Jan 27 12:33:06 crc kubenswrapper[4900]: I0127 12:33:06.524560 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" Jan 27 12:33:06 crc kubenswrapper[4900]: I0127 12:33:06.526774 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-xpvdx" Jan 27 12:33:06 crc kubenswrapper[4900]: I0127 12:33:06.527795 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Jan 27 12:33:06 crc kubenswrapper[4900]: I0127 12:33:06.535912 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l"] Jan 27 12:33:06 crc kubenswrapper[4900]: I0127 12:33:06.622512 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-rds4l\" (UID: \"0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" Jan 27 12:33:06 crc kubenswrapper[4900]: I0127 12:33:06.724859 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-rds4l\" (UID: \"0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" Jan 27 12:33:06 crc kubenswrapper[4900]: E0127 12:33:06.725030 4900 secret.go:188] Couldn't get secret openshift-monitoring/prometheus-operator-admission-webhook-tls: secret "prometheus-operator-admission-webhook-tls" not found Jan 27 12:33:06 crc kubenswrapper[4900]: E0127 12:33:06.725127 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677-tls-certificates podName:0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677 nodeName:}" failed. No retries permitted until 2026-01-27 12:33:07.225100691 +0000 UTC m=+414.462128901 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-certificates" (UniqueName: "kubernetes.io/secret/0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677-tls-certificates") pod "prometheus-operator-admission-webhook-f54c54754-rds4l" (UID: "0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677") : secret "prometheus-operator-admission-webhook-tls" not found Jan 27 12:33:07 crc kubenswrapper[4900]: I0127 12:33:07.232769 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-rds4l\" (UID: \"0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" Jan 27 12:33:07 crc kubenswrapper[4900]: I0127 12:33:07.241742 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-rds4l\" (UID: \"0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" Jan 27 12:33:07 crc kubenswrapper[4900]: I0127 12:33:07.441091 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" Jan 27 12:33:07 crc kubenswrapper[4900]: I0127 12:33:07.831124 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l"] Jan 27 12:33:08 crc kubenswrapper[4900]: I0127 12:33:08.363989 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" event={"ID":"0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677","Type":"ContainerStarted","Data":"d111298c0284f8eb8750c40b9acd8bc5fd1e90dd09f9873be6d322bc0cf8af59"} Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.381013 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" event={"ID":"0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677","Type":"ContainerStarted","Data":"7a6d7c54d44701c25092d982003a1fcce8badc067d63a3f8fbdce05e3476b742"} Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.381401 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.388226 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.396622 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" podStartSLOduration=2.404703416 podStartE2EDuration="5.39659397s" podCreationTimestamp="2026-01-27 12:33:06 +0000 UTC" firstStartedPulling="2026-01-27 12:33:07.839236007 +0000 UTC m=+415.076264217" lastFinishedPulling="2026-01-27 12:33:10.831126561 +0000 UTC m=+418.068154771" observedRunningTime="2026-01-27 12:33:11.395818447 +0000 UTC m=+418.632846677" watchObservedRunningTime="2026-01-27 12:33:11.39659397 +0000 UTC m=+418.633622180" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.609358 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-7qw9t"] Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.610438 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.613066 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.613153 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.613605 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-pjmwc" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.614236 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.627082 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-7qw9t"] Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.704445 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/1387cbcd-5c83-4410-9635-c7a397574a47-metrics-client-ca\") pod \"prometheus-operator-db54df47d-7qw9t\" (UID: \"1387cbcd-5c83-4410-9635-c7a397574a47\") " pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.704507 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/1387cbcd-5c83-4410-9635-c7a397574a47-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-7qw9t\" (UID: \"1387cbcd-5c83-4410-9635-c7a397574a47\") " pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.704546 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvcbw\" (UniqueName: \"kubernetes.io/projected/1387cbcd-5c83-4410-9635-c7a397574a47-kube-api-access-pvcbw\") pod \"prometheus-operator-db54df47d-7qw9t\" (UID: \"1387cbcd-5c83-4410-9635-c7a397574a47\") " pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.704580 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/1387cbcd-5c83-4410-9635-c7a397574a47-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-7qw9t\" (UID: \"1387cbcd-5c83-4410-9635-c7a397574a47\") " pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.806318 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/1387cbcd-5c83-4410-9635-c7a397574a47-metrics-client-ca\") pod \"prometheus-operator-db54df47d-7qw9t\" (UID: \"1387cbcd-5c83-4410-9635-c7a397574a47\") " pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.806410 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/1387cbcd-5c83-4410-9635-c7a397574a47-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-7qw9t\" (UID: \"1387cbcd-5c83-4410-9635-c7a397574a47\") " pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.806468 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvcbw\" (UniqueName: \"kubernetes.io/projected/1387cbcd-5c83-4410-9635-c7a397574a47-kube-api-access-pvcbw\") pod \"prometheus-operator-db54df47d-7qw9t\" (UID: \"1387cbcd-5c83-4410-9635-c7a397574a47\") " pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.806514 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/1387cbcd-5c83-4410-9635-c7a397574a47-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-7qw9t\" (UID: \"1387cbcd-5c83-4410-9635-c7a397574a47\") " pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.808812 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/1387cbcd-5c83-4410-9635-c7a397574a47-metrics-client-ca\") pod \"prometheus-operator-db54df47d-7qw9t\" (UID: \"1387cbcd-5c83-4410-9635-c7a397574a47\") " pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.830898 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/1387cbcd-5c83-4410-9635-c7a397574a47-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-7qw9t\" (UID: \"1387cbcd-5c83-4410-9635-c7a397574a47\") " pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.832586 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/1387cbcd-5c83-4410-9635-c7a397574a47-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-7qw9t\" (UID: \"1387cbcd-5c83-4410-9635-c7a397574a47\") " pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.833532 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvcbw\" (UniqueName: \"kubernetes.io/projected/1387cbcd-5c83-4410-9635-c7a397574a47-kube-api-access-pvcbw\") pod \"prometheus-operator-db54df47d-7qw9t\" (UID: \"1387cbcd-5c83-4410-9635-c7a397574a47\") " pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" Jan 27 12:33:11 crc kubenswrapper[4900]: I0127 12:33:11.926436 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" Jan 27 12:33:12 crc kubenswrapper[4900]: I0127 12:33:12.322154 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-7qw9t"] Jan 27 12:33:12 crc kubenswrapper[4900]: I0127 12:33:12.389458 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" event={"ID":"1387cbcd-5c83-4410-9635-c7a397574a47","Type":"ContainerStarted","Data":"ec4bebe5f98e32b32315c605ccd7674299c4f5f2bcc42a36d267d5a013c6e816"} Jan 27 12:33:15 crc kubenswrapper[4900]: I0127 12:33:15.415866 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" event={"ID":"1387cbcd-5c83-4410-9635-c7a397574a47","Type":"ContainerStarted","Data":"e0ce483caf98939ae0ed7d3e729ce2f172ed365e34279de7588e8ad350167d93"} Jan 27 12:33:16 crc kubenswrapper[4900]: I0127 12:33:16.440236 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" event={"ID":"1387cbcd-5c83-4410-9635-c7a397574a47","Type":"ContainerStarted","Data":"ca2b2f6015776d65803455ab55d0122fa8f68355049548a3be83b042c75f0f78"} Jan 27 12:33:16 crc kubenswrapper[4900]: I0127 12:33:16.474218 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-db54df47d-7qw9t" podStartSLOduration=2.70678264 podStartE2EDuration="5.474181345s" podCreationTimestamp="2026-01-27 12:33:11 +0000 UTC" firstStartedPulling="2026-01-27 12:33:12.333435148 +0000 UTC m=+419.570463358" lastFinishedPulling="2026-01-27 12:33:15.100833853 +0000 UTC m=+422.337862063" observedRunningTime="2026-01-27 12:33:16.466718392 +0000 UTC m=+423.703746632" watchObservedRunningTime="2026-01-27 12:33:16.474181345 +0000 UTC m=+423.711209555" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.031973 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/node-exporter-kjnxp"] Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.034112 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.036453 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-h5mfk" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.036932 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.039187 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.044597 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx"] Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.048165 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.052747 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.052862 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.055220 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-ndw5l" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.069848 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45"] Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.071771 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.073970 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.074029 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.073970 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-wppvv" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.074495 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.074488 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx"] Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.104925 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45"] Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113428 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/2525752e-b31e-4e09-8376-6bdf714537f9-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113488 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-node-exporter-wtmp\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113512 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28hvs\" (UniqueName: \"kubernetes.io/projected/2525752e-b31e-4e09-8376-6bdf714537f9-kube-api-access-28hvs\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113534 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-sys\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113572 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-node-exporter-textfile\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113599 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/68d3baca-f774-4385-a9b8-355441784811-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-7j8xx\" (UID: \"68d3baca-f774-4385-a9b8-355441784811\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113654 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113676 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/68d3baca-f774-4385-a9b8-355441784811-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-7j8xx\" (UID: \"68d3baca-f774-4385-a9b8-355441784811\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113704 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-root\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113732 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-metrics-client-ca\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113753 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/2525752e-b31e-4e09-8376-6bdf714537f9-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113779 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/2525752e-b31e-4e09-8376-6bdf714537f9-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113800 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/2525752e-b31e-4e09-8376-6bdf714537f9-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113820 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-node-exporter-tls\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113840 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b88ll\" (UniqueName: \"kubernetes.io/projected/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-kube-api-access-b88ll\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113870 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dz2b5\" (UniqueName: \"kubernetes.io/projected/68d3baca-f774-4385-a9b8-355441784811-kube-api-access-dz2b5\") pod \"openshift-state-metrics-566fddb674-7j8xx\" (UID: \"68d3baca-f774-4385-a9b8-355441784811\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113889 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/2525752e-b31e-4e09-8376-6bdf714537f9-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.113906 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/68d3baca-f774-4385-a9b8-355441784811-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-7j8xx\" (UID: \"68d3baca-f774-4385-a9b8-355441784811\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.215707 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/2525752e-b31e-4e09-8376-6bdf714537f9-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.215778 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-node-exporter-tls\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.215800 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b88ll\" (UniqueName: \"kubernetes.io/projected/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-kube-api-access-b88ll\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.215838 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dz2b5\" (UniqueName: \"kubernetes.io/projected/68d3baca-f774-4385-a9b8-355441784811-kube-api-access-dz2b5\") pod \"openshift-state-metrics-566fddb674-7j8xx\" (UID: \"68d3baca-f774-4385-a9b8-355441784811\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.215859 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/2525752e-b31e-4e09-8376-6bdf714537f9-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.215878 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/68d3baca-f774-4385-a9b8-355441784811-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-7j8xx\" (UID: \"68d3baca-f774-4385-a9b8-355441784811\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.215948 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/2525752e-b31e-4e09-8376-6bdf714537f9-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.215967 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-node-exporter-wtmp\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.215991 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28hvs\" (UniqueName: \"kubernetes.io/projected/2525752e-b31e-4e09-8376-6bdf714537f9-kube-api-access-28hvs\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.216012 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-sys\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.216039 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-node-exporter-textfile\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: E0127 12:33:18.216026 4900 secret.go:188] Couldn't get secret openshift-monitoring/kube-state-metrics-tls: secret "kube-state-metrics-tls" not found Jan 27 12:33:18 crc kubenswrapper[4900]: E0127 12:33:18.216215 4900 secret.go:188] Couldn't get secret openshift-monitoring/openshift-state-metrics-tls: secret "openshift-state-metrics-tls" not found Jan 27 12:33:18 crc kubenswrapper[4900]: E0127 12:33:18.216239 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2525752e-b31e-4e09-8376-6bdf714537f9-kube-state-metrics-tls podName:2525752e-b31e-4e09-8376-6bdf714537f9 nodeName:}" failed. No retries permitted until 2026-01-27 12:33:18.716194375 +0000 UTC m=+425.953222775 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-state-metrics-tls" (UniqueName: "kubernetes.io/secret/2525752e-b31e-4e09-8376-6bdf714537f9-kube-state-metrics-tls") pod "kube-state-metrics-777cb5bd5d-r5q45" (UID: "2525752e-b31e-4e09-8376-6bdf714537f9") : secret "kube-state-metrics-tls" not found Jan 27 12:33:18 crc kubenswrapper[4900]: E0127 12:33:18.216300 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/68d3baca-f774-4385-a9b8-355441784811-openshift-state-metrics-tls podName:68d3baca-f774-4385-a9b8-355441784811 nodeName:}" failed. No retries permitted until 2026-01-27 12:33:18.716272957 +0000 UTC m=+425.953301167 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "openshift-state-metrics-tls" (UniqueName: "kubernetes.io/secret/68d3baca-f774-4385-a9b8-355441784811-openshift-state-metrics-tls") pod "openshift-state-metrics-566fddb674-7j8xx" (UID: "68d3baca-f774-4385-a9b8-355441784811") : secret "openshift-state-metrics-tls" not found Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.216079 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/68d3baca-f774-4385-a9b8-355441784811-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-7j8xx\" (UID: \"68d3baca-f774-4385-a9b8-355441784811\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.216547 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.216593 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/68d3baca-f774-4385-a9b8-355441784811-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-7j8xx\" (UID: \"68d3baca-f774-4385-a9b8-355441784811\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.216624 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-root\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.216680 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-metrics-client-ca\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.216711 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/2525752e-b31e-4e09-8376-6bdf714537f9-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.216753 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/2525752e-b31e-4e09-8376-6bdf714537f9-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.216812 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-sys\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.216832 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-node-exporter-wtmp\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.216923 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"root\" (UniqueName: \"kubernetes.io/host-path/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-root\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.217907 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-metrics-client-ca\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.217913 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-node-exporter-textfile\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.218569 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/2525752e-b31e-4e09-8376-6bdf714537f9-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.218684 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/68d3baca-f774-4385-a9b8-355441784811-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-7j8xx\" (UID: \"68d3baca-f774-4385-a9b8-355441784811\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.218865 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/2525752e-b31e-4e09-8376-6bdf714537f9-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.218893 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/2525752e-b31e-4e09-8376-6bdf714537f9-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.229551 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.229628 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/68d3baca-f774-4385-a9b8-355441784811-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-7j8xx\" (UID: \"68d3baca-f774-4385-a9b8-355441784811\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.229879 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/2525752e-b31e-4e09-8376-6bdf714537f9-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.237857 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-node-exporter-tls\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.238903 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dz2b5\" (UniqueName: \"kubernetes.io/projected/68d3baca-f774-4385-a9b8-355441784811-kube-api-access-dz2b5\") pod \"openshift-state-metrics-566fddb674-7j8xx\" (UID: \"68d3baca-f774-4385-a9b8-355441784811\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.250242 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28hvs\" (UniqueName: \"kubernetes.io/projected/2525752e-b31e-4e09-8376-6bdf714537f9-kube-api-access-28hvs\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.253951 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b88ll\" (UniqueName: \"kubernetes.io/projected/3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3-kube-api-access-b88ll\") pod \"node-exporter-kjnxp\" (UID: \"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3\") " pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.354299 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-kjnxp" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.455519 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-kjnxp" event={"ID":"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3","Type":"ContainerStarted","Data":"ead3dc0dc6f38885c39c2074bb59e1f7b1f97141082866275c398b8463498044"} Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.727398 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/68d3baca-f774-4385-a9b8-355441784811-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-7j8xx\" (UID: \"68d3baca-f774-4385-a9b8-355441784811\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.727518 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/2525752e-b31e-4e09-8376-6bdf714537f9-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.734112 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/68d3baca-f774-4385-a9b8-355441784811-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-7j8xx\" (UID: \"68d3baca-f774-4385-a9b8-355441784811\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.748683 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/2525752e-b31e-4e09-8376-6bdf714537f9-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-r5q45\" (UID: \"2525752e-b31e-4e09-8376-6bdf714537f9\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.967635 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" Jan 27 12:33:18 crc kubenswrapper[4900]: I0127 12:33:18.989036 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.110749 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.113409 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.116188 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.116361 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.116521 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.116616 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.117337 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-ngf6r" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.117639 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.121188 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.124482 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.128621 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.132295 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.132355 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.132385 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-web-config\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.132423 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-config-out\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.132448 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-config-volume\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.132472 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.132523 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9hp8\" (UniqueName: \"kubernetes.io/projected/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-kube-api-access-g9hp8\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.132585 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-tls-assets\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.132608 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.132627 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.132659 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.132687 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.163419 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.233690 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-tls-assets\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.233775 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.233810 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.233856 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.233904 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.233944 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.233976 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.234007 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-web-config\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.234032 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-config-out\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.234080 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-config-volume\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.234130 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.234160 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9hp8\" (UniqueName: \"kubernetes.io/projected/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-kube-api-access-g9hp8\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.241012 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-tls-assets\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.244220 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.245138 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.250224 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.261220 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.273994 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-config-out\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.275307 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.276871 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-config-volume\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.278452 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.281497 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-web-config\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.297322 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9hp8\" (UniqueName: \"kubernetes.io/projected/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-kube-api-access-g9hp8\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.310831 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/5fbc3e87-c231-4ae3-8e9b-53266dc5269d-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"5fbc3e87-c231-4ae3-8e9b-53266dc5269d\") " pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.433686 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.610375 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx"] Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.700038 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45"] Jan 27 12:33:19 crc kubenswrapper[4900]: I0127 12:33:19.916768 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.058201 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6"] Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.060488 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.062739 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.063852 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.064447 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.064610 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.064736 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-2ckjuetffellu" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.064815 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.064876 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-d2qzs" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.091219 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6"] Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.148406 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.148497 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.148547 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hl7h9\" (UniqueName: \"kubernetes.io/projected/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-kube-api-access-hl7h9\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.148592 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-metrics-client-ca\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.148618 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-grpc-tls\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.148702 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.148723 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-tls\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.148753 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.250726 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-metrics-client-ca\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.250826 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-grpc-tls\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.250907 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.250969 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-tls\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.251009 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.251114 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.251158 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.251191 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hl7h9\" (UniqueName: \"kubernetes.io/projected/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-kube-api-access-hl7h9\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.252221 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-metrics-client-ca\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.257864 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.258250 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.259411 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.261227 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.266615 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-grpc-tls\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.269274 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-secret-thanos-querier-tls\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.272031 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hl7h9\" (UniqueName: \"kubernetes.io/projected/a3f86dbd-dce0-4546-8668-e235cc7b5b2d-kube-api-access-hl7h9\") pod \"thanos-querier-6dfcd64f45-mmzj6\" (UID: \"a3f86dbd-dce0-4546-8668-e235cc7b5b2d\") " pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.380109 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.478411 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"5fbc3e87-c231-4ae3-8e9b-53266dc5269d","Type":"ContainerStarted","Data":"092bdc37a9dc291dc6a7d9f25167b6192fe17c3b05d853a0371d43c1f01510e9"} Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.491514 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" event={"ID":"2525752e-b31e-4e09-8376-6bdf714537f9","Type":"ContainerStarted","Data":"587fffdf8dc9f76ce4de39b7adc7bbee6ca4eb7b09c99b738e00b6b6c00b7b9d"} Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.491551 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" event={"ID":"68d3baca-f774-4385-a9b8-355441784811","Type":"ContainerStarted","Data":"347cf517cf037a904e0872f33822440051f70e9dbb2a64f3fb188b327a32adf9"} Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.491564 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" event={"ID":"68d3baca-f774-4385-a9b8-355441784811","Type":"ContainerStarted","Data":"a0336a64232e147e76d1ec46b3e7b816ece0c4df461ff903d1f55d151b862b0d"} Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.491573 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" event={"ID":"68d3baca-f774-4385-a9b8-355441784811","Type":"ContainerStarted","Data":"9f7fed1e65a707f77bff948160800ec9efca8b491baecd38bac30173a7cff956"} Jan 27 12:33:20 crc kubenswrapper[4900]: I0127 12:33:20.827992 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6"] Jan 27 12:33:20 crc kubenswrapper[4900]: W0127 12:33:20.844264 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3f86dbd_dce0_4546_8668_e235cc7b5b2d.slice/crio-77c1b33938a6400b64f4d3bcb35cf1c2af1511d673e5fb5f30e2308fc706e979 WatchSource:0}: Error finding container 77c1b33938a6400b64f4d3bcb35cf1c2af1511d673e5fb5f30e2308fc706e979: Status 404 returned error can't find the container with id 77c1b33938a6400b64f4d3bcb35cf1c2af1511d673e5fb5f30e2308fc706e979 Jan 27 12:33:21 crc kubenswrapper[4900]: I0127 12:33:21.498962 4900 generic.go:334] "Generic (PLEG): container finished" podID="3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3" containerID="2809a6e68582b0b93614d18ac87823cac6152b2749df0779a5dd0a0f0d5e619f" exitCode=0 Jan 27 12:33:21 crc kubenswrapper[4900]: I0127 12:33:21.499045 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-kjnxp" event={"ID":"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3","Type":"ContainerDied","Data":"2809a6e68582b0b93614d18ac87823cac6152b2749df0779a5dd0a0f0d5e619f"} Jan 27 12:33:21 crc kubenswrapper[4900]: I0127 12:33:21.506446 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" event={"ID":"a3f86dbd-dce0-4546-8668-e235cc7b5b2d","Type":"ContainerStarted","Data":"77c1b33938a6400b64f4d3bcb35cf1c2af1511d673e5fb5f30e2308fc706e979"} Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.379130 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.379216 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.379273 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.379974 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ef8a105b9e3140aef604e358616ef8284c481fa15985cc985f8c4ca4dcabda76"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.380035 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://ef8a105b9e3140aef604e358616ef8284c481fa15985cc985f8c4ca4dcabda76" gracePeriod=600 Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.520108 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="ef8a105b9e3140aef604e358616ef8284c481fa15985cc985f8c4ca4dcabda76" exitCode=0 Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.520186 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"ef8a105b9e3140aef604e358616ef8284c481fa15985cc985f8c4ca4dcabda76"} Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.520224 4900 scope.go:117] "RemoveContainer" containerID="799d1a174787b0e5e1d0fbe4dd1d8784b4f6076c631410ab8be8af90a5f4fc67" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.525899 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-kjnxp" event={"ID":"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3","Type":"ContainerStarted","Data":"a2e2b5afa310bea90524b96fb9a3efe31de92788f36e2976b362f7506e614bb1"} Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.784455 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-644c4585f7-mjqr6"] Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.785883 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.844813 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-644c4585f7-mjqr6"] Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.895319 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-config\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.895398 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-trusted-ca-bundle\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.895441 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-oauth-config\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.895467 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-oauth-serving-cert\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.895485 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ltk7\" (UniqueName: \"kubernetes.io/projected/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-kube-api-access-4ltk7\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.895505 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-serving-cert\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.895519 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-service-ca\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.997043 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-oauth-config\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.997134 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-oauth-serving-cert\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.997159 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ltk7\" (UniqueName: \"kubernetes.io/projected/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-kube-api-access-4ltk7\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.997185 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-serving-cert\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.997207 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-service-ca\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.997302 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-config\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.997330 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-trusted-ca-bundle\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.998434 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-oauth-serving-cert\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.998485 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-service-ca\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.998520 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-config\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:22 crc kubenswrapper[4900]: I0127 12:33:22.999303 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-trusted-ca-bundle\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.005050 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-serving-cert\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.012911 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-oauth-config\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.019134 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ltk7\" (UniqueName: \"kubernetes.io/projected/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-kube-api-access-4ltk7\") pod \"console-644c4585f7-mjqr6\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.100880 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.431016 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/metrics-server-7dbbbb77f-fjj4n"] Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.431847 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.435396 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-7dbbbb77f-fjj4n"] Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.436701 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.437864 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.438135 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-9qni3tjn1a3p8" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.438289 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.438458 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.439328 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-7k4rq" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.607410 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61ef39b0-502c-45d5-be3a-e11c6ae19d59-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.607580 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-png2m\" (UniqueName: \"kubernetes.io/projected/61ef39b0-502c-45d5-be3a-e11c6ae19d59-kube-api-access-png2m\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.607618 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61ef39b0-502c-45d5-be3a-e11c6ae19d59-client-ca-bundle\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.607649 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/61ef39b0-502c-45d5-be3a-e11c6ae19d59-secret-metrics-client-certs\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.607937 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/61ef39b0-502c-45d5-be3a-e11c6ae19d59-audit-log\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.608069 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/61ef39b0-502c-45d5-be3a-e11c6ae19d59-secret-metrics-server-tls\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.608205 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/61ef39b0-502c-45d5-be3a-e11c6ae19d59-metrics-server-audit-profiles\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.711923 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61ef39b0-502c-45d5-be3a-e11c6ae19d59-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.712325 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-png2m\" (UniqueName: \"kubernetes.io/projected/61ef39b0-502c-45d5-be3a-e11c6ae19d59-kube-api-access-png2m\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.712353 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61ef39b0-502c-45d5-be3a-e11c6ae19d59-client-ca-bundle\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.712378 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/61ef39b0-502c-45d5-be3a-e11c6ae19d59-secret-metrics-client-certs\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.712433 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/61ef39b0-502c-45d5-be3a-e11c6ae19d59-audit-log\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.712456 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/61ef39b0-502c-45d5-be3a-e11c6ae19d59-secret-metrics-server-tls\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.712481 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/61ef39b0-502c-45d5-be3a-e11c6ae19d59-metrics-server-audit-profiles\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.713918 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61ef39b0-502c-45d5-be3a-e11c6ae19d59-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.714317 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/61ef39b0-502c-45d5-be3a-e11c6ae19d59-metrics-server-audit-profiles\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.714652 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/61ef39b0-502c-45d5-be3a-e11c6ae19d59-audit-log\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.717964 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/61ef39b0-502c-45d5-be3a-e11c6ae19d59-secret-metrics-client-certs\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.719975 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61ef39b0-502c-45d5-be3a-e11c6ae19d59-client-ca-bundle\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.720723 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/61ef39b0-502c-45d5-be3a-e11c6ae19d59-secret-metrics-server-tls\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.731798 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-png2m\" (UniqueName: \"kubernetes.io/projected/61ef39b0-502c-45d5-be3a-e11c6ae19d59-kube-api-access-png2m\") pod \"metrics-server-7dbbbb77f-fjj4n\" (UID: \"61ef39b0-502c-45d5-be3a-e11c6ae19d59\") " pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.753190 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.801835 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk"] Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.807411 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk"] Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.809811 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.817986 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.818388 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Jan 27 12:33:23 crc kubenswrapper[4900]: I0127 12:33:23.927564 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/654e1706-9908-41f6-ba56-90e58ff3f665-monitoring-plugin-cert\") pod \"monitoring-plugin-66c88bc574-zpdfk\" (UID: \"654e1706-9908-41f6-ba56-90e58ff3f665\") " pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.029331 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/654e1706-9908-41f6-ba56-90e58ff3f665-monitoring-plugin-cert\") pod \"monitoring-plugin-66c88bc574-zpdfk\" (UID: \"654e1706-9908-41f6-ba56-90e58ff3f665\") " pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.092155 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/654e1706-9908-41f6-ba56-90e58ff3f665-monitoring-plugin-cert\") pod \"monitoring-plugin-66c88bc574-zpdfk\" (UID: \"654e1706-9908-41f6-ba56-90e58ff3f665\") " pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.142894 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.287587 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-644c4585f7-mjqr6"] Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.570089 4900 generic.go:334] "Generic (PLEG): container finished" podID="5fbc3e87-c231-4ae3-8e9b-53266dc5269d" containerID="4d5cd2e7ea8618d52c181260927108d3a3faddd828f4f5a6752028d265715b1d" exitCode=0 Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.570419 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"5fbc3e87-c231-4ae3-8e9b-53266dc5269d","Type":"ContainerDied","Data":"4d5cd2e7ea8618d52c181260927108d3a3faddd828f4f5a6752028d265715b1d"} Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.600878 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk"] Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.625327 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" event={"ID":"2525752e-b31e-4e09-8376-6bdf714537f9","Type":"ContainerStarted","Data":"c5a4ebf8d6b64347330eef29c3d122559418806db576940f4fb9ec2251db19c5"} Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.625398 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" event={"ID":"2525752e-b31e-4e09-8376-6bdf714537f9","Type":"ContainerStarted","Data":"bac4d0487485951794b34201ade5204c4aaf92d3dbb02c550ac8d3f5012085da"} Jan 27 12:33:24 crc kubenswrapper[4900]: W0127 12:33:24.626772 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod654e1706_9908_41f6_ba56_90e58ff3f665.slice/crio-ac1da5ea021873f50defdd0af06f1e865d8ff7189fe41077fb269947f8c42a69 WatchSource:0}: Error finding container ac1da5ea021873f50defdd0af06f1e865d8ff7189fe41077fb269947f8c42a69: Status 404 returned error can't find the container with id ac1da5ea021873f50defdd0af06f1e865d8ff7189fe41077fb269947f8c42a69 Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.648628 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-kjnxp" event={"ID":"3bd80142-7ab6-4ce9-82a0-c91bb1b4b8f3","Type":"ContainerStarted","Data":"65661fea8654f8ef84ef6f78e52dfab2e7704778f247ba9840c84f2a29a2f016"} Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.651597 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-644c4585f7-mjqr6" event={"ID":"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d","Type":"ContainerStarted","Data":"4dc70dcb7749ed90ac63fc0440af624e53fce58c4a5651b2a858dfda755a1209"} Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.658493 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" event={"ID":"68d3baca-f774-4385-a9b8-355441784811","Type":"ContainerStarted","Data":"be0f3fc608b9cbc2e56140ef4a40c07adb4720a25ed85c29546b5680d4617968"} Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.671145 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" podStartSLOduration=3.193273096 podStartE2EDuration="6.671110053s" podCreationTimestamp="2026-01-27 12:33:18 +0000 UTC" firstStartedPulling="2026-01-27 12:33:20.166310975 +0000 UTC m=+427.403339185" lastFinishedPulling="2026-01-27 12:33:23.644147932 +0000 UTC m=+430.881176142" observedRunningTime="2026-01-27 12:33:24.665707001 +0000 UTC m=+431.902735211" watchObservedRunningTime="2026-01-27 12:33:24.671110053 +0000 UTC m=+431.908138263" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.686248 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"69e7e31d6ba683ecd2f02cfbc9899542c457d918143ad827391c0c3e681ae608"} Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.697636 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.700277 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.710126 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-6gknj" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.710632 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.710924 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-8hpfuur36as0k" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.711181 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.711390 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.711587 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.711732 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.712039 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.712227 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.712352 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.715524 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.715854 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-7dbbbb77f-fjj4n"] Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.717908 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/openshift-state-metrics-566fddb674-7j8xx" podStartSLOduration=3.420691863 podStartE2EDuration="6.71786554s" podCreationTimestamp="2026-01-27 12:33:18 +0000 UTC" firstStartedPulling="2026-01-27 12:33:20.37591427 +0000 UTC m=+427.612942480" lastFinishedPulling="2026-01-27 12:33:23.673087947 +0000 UTC m=+430.910116157" observedRunningTime="2026-01-27 12:33:24.717422307 +0000 UTC m=+431.954450527" watchObservedRunningTime="2026-01-27 12:33:24.71786554 +0000 UTC m=+431.954893750" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.730276 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.730424 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.749184 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.777094 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/node-exporter-kjnxp" podStartSLOduration=4.774564604 podStartE2EDuration="6.777048629s" podCreationTimestamp="2026-01-27 12:33:18 +0000 UTC" firstStartedPulling="2026-01-27 12:33:18.388510275 +0000 UTC m=+425.625538485" lastFinishedPulling="2026-01-27 12:33:20.39099431 +0000 UTC m=+427.628022510" observedRunningTime="2026-01-27 12:33:24.766411211 +0000 UTC m=+432.003439431" watchObservedRunningTime="2026-01-27 12:33:24.777048629 +0000 UTC m=+432.014076839" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.796219 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-644c4585f7-mjqr6" podStartSLOduration=2.796189161 podStartE2EDuration="2.796189161s" podCreationTimestamp="2026-01-27 12:33:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:33:24.785932974 +0000 UTC m=+432.022961204" watchObservedRunningTime="2026-01-27 12:33:24.796189161 +0000 UTC m=+432.033217371" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851644 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/f3eb0985-f56f-4111-86c6-d511433058c0-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851686 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851706 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-web-config\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851747 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851768 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851787 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/f3eb0985-f56f-4111-86c6-d511433058c0-config-out\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851804 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/f3eb0985-f56f-4111-86c6-d511433058c0-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851835 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851857 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851874 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851890 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851930 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-config\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851951 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851972 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7z78\" (UniqueName: \"kubernetes.io/projected/f3eb0985-f56f-4111-86c6-d511433058c0-kube-api-access-w7z78\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.851998 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.852015 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.852042 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.852103 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953090 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7z78\" (UniqueName: \"kubernetes.io/projected/f3eb0985-f56f-4111-86c6-d511433058c0-kube-api-access-w7z78\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953174 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953203 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953226 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953261 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953285 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/f3eb0985-f56f-4111-86c6-d511433058c0-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953303 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953323 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-web-config\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953355 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953377 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953393 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/f3eb0985-f56f-4111-86c6-d511433058c0-config-out\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953409 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/f3eb0985-f56f-4111-86c6-d511433058c0-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953446 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953473 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953538 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953557 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953592 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-config\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.953621 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.954806 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.955525 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/f3eb0985-f56f-4111-86c6-d511433058c0-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.955599 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.961311 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.961685 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.962512 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.964600 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.964659 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.966009 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.966101 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.966440 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-config\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.966580 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/f3eb0985-f56f-4111-86c6-d511433058c0-config-out\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.967138 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/f3eb0985-f56f-4111-86c6-d511433058c0-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.967403 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.967583 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f3eb0985-f56f-4111-86c6-d511433058c0-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.970697 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.970892 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/f3eb0985-f56f-4111-86c6-d511433058c0-web-config\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:24 crc kubenswrapper[4900]: I0127 12:33:24.981656 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7z78\" (UniqueName: \"kubernetes.io/projected/f3eb0985-f56f-4111-86c6-d511433058c0-kube-api-access-w7z78\") pod \"prometheus-k8s-0\" (UID: \"f3eb0985-f56f-4111-86c6-d511433058c0\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:25 crc kubenswrapper[4900]: I0127 12:33:25.043755 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:25 crc kubenswrapper[4900]: I0127 12:33:25.544384 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Jan 27 12:33:25 crc kubenswrapper[4900]: I0127 12:33:25.700883 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" event={"ID":"61ef39b0-502c-45d5-be3a-e11c6ae19d59","Type":"ContainerStarted","Data":"eb165f0e1abae2f262085be2b7f0ec4e6b96f9589ce00fc3bbeb72fdb787876b"} Jan 27 12:33:25 crc kubenswrapper[4900]: I0127 12:33:25.705072 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-644c4585f7-mjqr6" event={"ID":"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d","Type":"ContainerStarted","Data":"ba795a7342211fb2dbb942eaf2adc1e4b74af73ee2652f689c5e1c756a1ec623"} Jan 27 12:33:25 crc kubenswrapper[4900]: I0127 12:33:25.717193 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-r5q45" event={"ID":"2525752e-b31e-4e09-8376-6bdf714537f9","Type":"ContainerStarted","Data":"710ebbe2f7b1656a8dcdab106749ab3569a6c8812ac65899601f5ff9251153f1"} Jan 27 12:33:25 crc kubenswrapper[4900]: I0127 12:33:25.719592 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" event={"ID":"654e1706-9908-41f6-ba56-90e58ff3f665","Type":"ContainerStarted","Data":"ac1da5ea021873f50defdd0af06f1e865d8ff7189fe41077fb269947f8c42a69"} Jan 27 12:33:25 crc kubenswrapper[4900]: I0127 12:33:25.723830 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"f3eb0985-f56f-4111-86c6-d511433058c0","Type":"ContainerStarted","Data":"a624b51016a95ffe14165ca8c5e3afefc6b813880a6f04f9a30a0b52e276e277"} Jan 27 12:33:26 crc kubenswrapper[4900]: I0127 12:33:26.728726 4900 generic.go:334] "Generic (PLEG): container finished" podID="f3eb0985-f56f-4111-86c6-d511433058c0" containerID="b547d25c4c128e720f389568b6e728cab7fa03711d32f2a7993cba97726c1409" exitCode=0 Jan 27 12:33:26 crc kubenswrapper[4900]: I0127 12:33:26.728883 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"f3eb0985-f56f-4111-86c6-d511433058c0","Type":"ContainerDied","Data":"b547d25c4c128e720f389568b6e728cab7fa03711d32f2a7993cba97726c1409"} Jan 27 12:33:29 crc kubenswrapper[4900]: I0127 12:33:29.783398 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"5fbc3e87-c231-4ae3-8e9b-53266dc5269d","Type":"ContainerStarted","Data":"45cc4add9359f7dd7955c91a82b05e4673886891bb8a996918023963f6e1e83d"} Jan 27 12:33:29 crc kubenswrapper[4900]: I0127 12:33:29.787669 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" event={"ID":"654e1706-9908-41f6-ba56-90e58ff3f665","Type":"ContainerStarted","Data":"627cf9afa39934fb2874638a5752309ccda7cbd8b865e3d2af79e6ec820e254f"} Jan 27 12:33:29 crc kubenswrapper[4900]: I0127 12:33:29.788530 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" Jan 27 12:33:29 crc kubenswrapper[4900]: I0127 12:33:29.797479 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" Jan 27 12:33:29 crc kubenswrapper[4900]: I0127 12:33:29.801114 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" event={"ID":"61ef39b0-502c-45d5-be3a-e11c6ae19d59","Type":"ContainerStarted","Data":"99bf9c8acda5d1f77bd4f37089b3e7e853fbbc968d684f9f2b5e55f9baf527a1"} Jan 27 12:33:29 crc kubenswrapper[4900]: I0127 12:33:29.811733 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" event={"ID":"a3f86dbd-dce0-4546-8668-e235cc7b5b2d","Type":"ContainerStarted","Data":"3e3f28954313b29b0357c6897bf1905923c36a58141b5320d56ce7938496e020"} Jan 27 12:33:29 crc kubenswrapper[4900]: I0127 12:33:29.836856 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" podStartSLOduration=2.097435173 podStartE2EDuration="6.836831993s" podCreationTimestamp="2026-01-27 12:33:23 +0000 UTC" firstStartedPulling="2026-01-27 12:33:24.630844709 +0000 UTC m=+431.867872919" lastFinishedPulling="2026-01-27 12:33:29.370241529 +0000 UTC m=+436.607269739" observedRunningTime="2026-01-27 12:33:29.83237039 +0000 UTC m=+437.069398600" watchObservedRunningTime="2026-01-27 12:33:29.836831993 +0000 UTC m=+437.073860203" Jan 27 12:33:29 crc kubenswrapper[4900]: I0127 12:33:29.864701 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" podStartSLOduration=2.218305326 podStartE2EDuration="6.864667925s" podCreationTimestamp="2026-01-27 12:33:23 +0000 UTC" firstStartedPulling="2026-01-27 12:33:24.724579651 +0000 UTC m=+431.961607861" lastFinishedPulling="2026-01-27 12:33:29.37094225 +0000 UTC m=+436.607970460" observedRunningTime="2026-01-27 12:33:29.860126879 +0000 UTC m=+437.097155099" watchObservedRunningTime="2026-01-27 12:33:29.864667925 +0000 UTC m=+437.101696135" Jan 27 12:33:30 crc kubenswrapper[4900]: I0127 12:33:30.829845 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" event={"ID":"a3f86dbd-dce0-4546-8668-e235cc7b5b2d","Type":"ContainerStarted","Data":"37fe964bf0f0eb48ec217582112599fdcd7a25c0c1be05b96d3aed9c37b5a01f"} Jan 27 12:33:30 crc kubenswrapper[4900]: I0127 12:33:30.830172 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" event={"ID":"a3f86dbd-dce0-4546-8668-e235cc7b5b2d","Type":"ContainerStarted","Data":"fde35bdca1723f1eaa8ae229bae851020963d5887677f2fe567db2fc90e45299"} Jan 27 12:33:30 crc kubenswrapper[4900]: I0127 12:33:30.833726 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"5fbc3e87-c231-4ae3-8e9b-53266dc5269d","Type":"ContainerStarted","Data":"a8845fbb0e9ef60c0c0dbf7fe929306661fd304053a95fc64e21a26bafa14c78"} Jan 27 12:33:30 crc kubenswrapper[4900]: I0127 12:33:30.833798 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"5fbc3e87-c231-4ae3-8e9b-53266dc5269d","Type":"ContainerStarted","Data":"0dd024f71872c775e49807f0061c263af62a6edf98d487dd70a21e54f9e298b6"} Jan 27 12:33:30 crc kubenswrapper[4900]: I0127 12:33:30.833812 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"5fbc3e87-c231-4ae3-8e9b-53266dc5269d","Type":"ContainerStarted","Data":"c889b0e2543797f9a161156f3966800d8d416867aee1a635511b9ce936c18604"} Jan 27 12:33:30 crc kubenswrapper[4900]: I0127 12:33:30.833829 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"5fbc3e87-c231-4ae3-8e9b-53266dc5269d","Type":"ContainerStarted","Data":"83787833cd800978353e88fb8c850dba76fcb5b50be95135f3f813cfe15889dc"} Jan 27 12:33:32 crc kubenswrapper[4900]: I0127 12:33:32.969422 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"f3eb0985-f56f-4111-86c6-d511433058c0","Type":"ContainerStarted","Data":"2641adc1c48248b8575f1d4ede2e1d6e5513a2e6aca79522238b24f72e41aee8"} Jan 27 12:33:32 crc kubenswrapper[4900]: I0127 12:33:32.970231 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"f3eb0985-f56f-4111-86c6-d511433058c0","Type":"ContainerStarted","Data":"8936efe71b62367a0f12387dde65081963bda8bae82e7e19cd1a74a666a0858f"} Jan 27 12:33:32 crc kubenswrapper[4900]: I0127 12:33:32.970245 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"f3eb0985-f56f-4111-86c6-d511433058c0","Type":"ContainerStarted","Data":"13eccb0dd8a88f08e66835ed7598480a62ee8d165a1afd8559d697a1ee8ff417"} Jan 27 12:33:33 crc kubenswrapper[4900]: I0127 12:33:33.101829 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:33 crc kubenswrapper[4900]: I0127 12:33:33.101981 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:33 crc kubenswrapper[4900]: I0127 12:33:33.113653 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:33 crc kubenswrapper[4900]: I0127 12:33:33.982601 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"f3eb0985-f56f-4111-86c6-d511433058c0","Type":"ContainerStarted","Data":"84ce39d978ea68ac53e1111ea2cbe6998b8d437377524d5c469edf3e3a60da1c"} Jan 27 12:33:33 crc kubenswrapper[4900]: I0127 12:33:33.983984 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"f3eb0985-f56f-4111-86c6-d511433058c0","Type":"ContainerStarted","Data":"ac8bf3cb6ceae957de0fec23236ad7c039b0ff9d75815832d5371efa214daadb"} Jan 27 12:33:33 crc kubenswrapper[4900]: I0127 12:33:33.984117 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"f3eb0985-f56f-4111-86c6-d511433058c0","Type":"ContainerStarted","Data":"d180c43a122a8afaeaf2cbe45524fcdebb913dd561033b1a339995b073dd3212"} Jan 27 12:33:33 crc kubenswrapper[4900]: I0127 12:33:33.988353 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" event={"ID":"a3f86dbd-dce0-4546-8668-e235cc7b5b2d","Type":"ContainerStarted","Data":"d2984ce9b086b100489d25875bb91503a48fc39d3b1df00bf9360ead99295f40"} Jan 27 12:33:33 crc kubenswrapper[4900]: I0127 12:33:33.988394 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" event={"ID":"a3f86dbd-dce0-4546-8668-e235cc7b5b2d","Type":"ContainerStarted","Data":"c45b63bfbd26809ee35d24f95413dc2d370b9ad84d49a32f4f710d70ad77e7e2"} Jan 27 12:33:33 crc kubenswrapper[4900]: I0127 12:33:33.988408 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" event={"ID":"a3f86dbd-dce0-4546-8668-e235cc7b5b2d","Type":"ContainerStarted","Data":"e90db0153e2d7d1b7ef49b61d7dfab102e9869d93a42c78c43c304a88a11d2c3"} Jan 27 12:33:33 crc kubenswrapper[4900]: I0127 12:33:33.988547 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:33 crc kubenswrapper[4900]: I0127 12:33:33.992821 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"5fbc3e87-c231-4ae3-8e9b-53266dc5269d","Type":"ContainerStarted","Data":"e1159e49dd5cb009c2e9168dbf65d6bee1d6d520939caea08f7acc0f4365b3a4"} Jan 27 12:33:33 crc kubenswrapper[4900]: I0127 12:33:33.996560 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:33:34 crc kubenswrapper[4900]: I0127 12:33:34.023434 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-k8s-0" podStartSLOduration=4.762117086 podStartE2EDuration="10.023397811s" podCreationTimestamp="2026-01-27 12:33:24 +0000 UTC" firstStartedPulling="2026-01-27 12:33:26.730500909 +0000 UTC m=+433.967529129" lastFinishedPulling="2026-01-27 12:33:31.991781644 +0000 UTC m=+439.228809854" observedRunningTime="2026-01-27 12:33:34.016643719 +0000 UTC m=+441.253671949" watchObservedRunningTime="2026-01-27 12:33:34.023397811 +0000 UTC m=+441.260426021" Jan 27 12:33:34 crc kubenswrapper[4900]: I0127 12:33:34.085656 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/alertmanager-main-0" podStartSLOduration=1.943324036 podStartE2EDuration="15.08562582s" podCreationTimestamp="2026-01-27 12:33:19 +0000 UTC" firstStartedPulling="2026-01-27 12:33:20.158481641 +0000 UTC m=+427.395509851" lastFinishedPulling="2026-01-27 12:33:33.300783435 +0000 UTC m=+440.537811635" observedRunningTime="2026-01-27 12:33:34.05682659 +0000 UTC m=+441.293854800" watchObservedRunningTime="2026-01-27 12:33:34.08562582 +0000 UTC m=+441.322654030" Jan 27 12:33:34 crc kubenswrapper[4900]: I0127 12:33:34.131717 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" podStartSLOduration=1.6947994240000002 podStartE2EDuration="14.131686527s" podCreationTimestamp="2026-01-27 12:33:20 +0000 UTC" firstStartedPulling="2026-01-27 12:33:20.846596735 +0000 UTC m=+428.083624945" lastFinishedPulling="2026-01-27 12:33:33.283483838 +0000 UTC m=+440.520512048" observedRunningTime="2026-01-27 12:33:34.12076542 +0000 UTC m=+441.357793640" watchObservedRunningTime="2026-01-27 12:33:34.131686527 +0000 UTC m=+441.368714737" Jan 27 12:33:34 crc kubenswrapper[4900]: I0127 12:33:34.137801 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5zbpj"] Jan 27 12:33:35 crc kubenswrapper[4900]: I0127 12:33:35.030278 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" Jan 27 12:33:35 crc kubenswrapper[4900]: I0127 12:33:35.045626 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:33:43 crc kubenswrapper[4900]: I0127 12:33:43.753357 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:43 crc kubenswrapper[4900]: I0127 12:33:43.753984 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.185632 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-5zbpj" podUID="6893cb7a-209f-4822-9e82-34ad39c7647f" containerName="console" containerID="cri-o://50743efa7a52672831dd486b237ed89506e5fb9f872c1518d1a7e681a850be61" gracePeriod=15 Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.535800 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5zbpj_6893cb7a-209f-4822-9e82-34ad39c7647f/console/0.log" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.535854 4900 generic.go:334] "Generic (PLEG): container finished" podID="6893cb7a-209f-4822-9e82-34ad39c7647f" containerID="50743efa7a52672831dd486b237ed89506e5fb9f872c1518d1a7e681a850be61" exitCode=2 Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.535927 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5zbpj" event={"ID":"6893cb7a-209f-4822-9e82-34ad39c7647f","Type":"ContainerDied","Data":"50743efa7a52672831dd486b237ed89506e5fb9f872c1518d1a7e681a850be61"} Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.535954 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5zbpj" event={"ID":"6893cb7a-209f-4822-9e82-34ad39c7647f","Type":"ContainerDied","Data":"9bc72981497f9fae182be4cf549f713490b6afca9f945dfd443674a18c7e4c8a"} Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.535969 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bc72981497f9fae182be4cf549f713490b6afca9f945dfd443674a18c7e4c8a" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.555417 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5zbpj_6893cb7a-209f-4822-9e82-34ad39c7647f/console/0.log" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.555502 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.658723 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-console-config\") pod \"6893cb7a-209f-4822-9e82-34ad39c7647f\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.658978 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-oauth-serving-cert\") pod \"6893cb7a-209f-4822-9e82-34ad39c7647f\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.659100 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6893cb7a-209f-4822-9e82-34ad39c7647f-console-serving-cert\") pod \"6893cb7a-209f-4822-9e82-34ad39c7647f\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.659130 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-drr87\" (UniqueName: \"kubernetes.io/projected/6893cb7a-209f-4822-9e82-34ad39c7647f-kube-api-access-drr87\") pod \"6893cb7a-209f-4822-9e82-34ad39c7647f\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.659154 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-trusted-ca-bundle\") pod \"6893cb7a-209f-4822-9e82-34ad39c7647f\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.659213 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-service-ca\") pod \"6893cb7a-209f-4822-9e82-34ad39c7647f\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.659253 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6893cb7a-209f-4822-9e82-34ad39c7647f-console-oauth-config\") pod \"6893cb7a-209f-4822-9e82-34ad39c7647f\" (UID: \"6893cb7a-209f-4822-9e82-34ad39c7647f\") " Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.659714 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "6893cb7a-209f-4822-9e82-34ad39c7647f" (UID: "6893cb7a-209f-4822-9e82-34ad39c7647f"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.660023 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-console-config" (OuterVolumeSpecName: "console-config") pod "6893cb7a-209f-4822-9e82-34ad39c7647f" (UID: "6893cb7a-209f-4822-9e82-34ad39c7647f"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.660114 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-service-ca" (OuterVolumeSpecName: "service-ca") pod "6893cb7a-209f-4822-9e82-34ad39c7647f" (UID: "6893cb7a-209f-4822-9e82-34ad39c7647f"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.660387 4900 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.660408 4900 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-console-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.660419 4900 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.660996 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6893cb7a-209f-4822-9e82-34ad39c7647f" (UID: "6893cb7a-209f-4822-9e82-34ad39c7647f"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.664745 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6893cb7a-209f-4822-9e82-34ad39c7647f-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "6893cb7a-209f-4822-9e82-34ad39c7647f" (UID: "6893cb7a-209f-4822-9e82-34ad39c7647f"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.665410 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6893cb7a-209f-4822-9e82-34ad39c7647f-kube-api-access-drr87" (OuterVolumeSpecName: "kube-api-access-drr87") pod "6893cb7a-209f-4822-9e82-34ad39c7647f" (UID: "6893cb7a-209f-4822-9e82-34ad39c7647f"). InnerVolumeSpecName "kube-api-access-drr87". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.665414 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6893cb7a-209f-4822-9e82-34ad39c7647f-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "6893cb7a-209f-4822-9e82-34ad39c7647f" (UID: "6893cb7a-209f-4822-9e82-34ad39c7647f"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.762039 4900 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6893cb7a-209f-4822-9e82-34ad39c7647f-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.762120 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-drr87\" (UniqueName: \"kubernetes.io/projected/6893cb7a-209f-4822-9e82-34ad39c7647f-kube-api-access-drr87\") on node \"crc\" DevicePath \"\"" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.762135 4900 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6893cb7a-209f-4822-9e82-34ad39c7647f-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:33:59 crc kubenswrapper[4900]: I0127 12:33:59.762147 4900 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6893cb7a-209f-4822-9e82-34ad39c7647f-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:34:00 crc kubenswrapper[4900]: I0127 12:34:00.541555 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5zbpj" Jan 27 12:34:00 crc kubenswrapper[4900]: I0127 12:34:00.568336 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5zbpj"] Jan 27 12:34:00 crc kubenswrapper[4900]: I0127 12:34:00.573520 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-5zbpj"] Jan 27 12:34:02 crc kubenswrapper[4900]: I0127 12:34:02.491174 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6893cb7a-209f-4822-9e82-34ad39c7647f" path="/var/lib/kubelet/pods/6893cb7a-209f-4822-9e82-34ad39c7647f/volumes" Jan 27 12:34:03 crc kubenswrapper[4900]: I0127 12:34:03.759255 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:34:03 crc kubenswrapper[4900]: I0127 12:34:03.763404 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 12:34:23 crc kubenswrapper[4900]: I0127 12:34:23.799099 4900 scope.go:117] "RemoveContainer" containerID="50743efa7a52672831dd486b237ed89506e5fb9f872c1518d1a7e681a850be61" Jan 27 12:34:25 crc kubenswrapper[4900]: I0127 12:34:25.045870 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:34:25 crc kubenswrapper[4900]: I0127 12:34:25.082268 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:34:25 crc kubenswrapper[4900]: I0127 12:34:25.729418 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-k8s-0" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.367287 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-55dbd56b55-fpf9f"] Jan 27 12:34:46 crc kubenswrapper[4900]: E0127 12:34:46.368420 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6893cb7a-209f-4822-9e82-34ad39c7647f" containerName="console" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.368443 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="6893cb7a-209f-4822-9e82-34ad39c7647f" containerName="console" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.368608 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="6893cb7a-209f-4822-9e82-34ad39c7647f" containerName="console" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.369504 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.388441 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-55dbd56b55-fpf9f"] Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.548636 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-trusted-ca-bundle\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.548729 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-serving-cert\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.549129 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-service-ca\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.549332 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-oauth-serving-cert\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.549376 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-config\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.549399 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-879nn\" (UniqueName: \"kubernetes.io/projected/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-kube-api-access-879nn\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.549508 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-oauth-config\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.651181 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-service-ca\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.652107 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-service-ca\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.652160 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-oauth-serving-cert\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.652285 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-config\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.652903 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-oauth-serving-cert\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.652981 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-config\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.653031 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-879nn\" (UniqueName: \"kubernetes.io/projected/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-kube-api-access-879nn\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.653116 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-oauth-config\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.653877 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-trusted-ca-bundle\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.653908 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-serving-cert\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.654925 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-trusted-ca-bundle\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.659685 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-oauth-config\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.660840 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-serving-cert\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.672486 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-879nn\" (UniqueName: \"kubernetes.io/projected/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-kube-api-access-879nn\") pod \"console-55dbd56b55-fpf9f\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:46 crc kubenswrapper[4900]: I0127 12:34:46.692520 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:47 crc kubenswrapper[4900]: I0127 12:34:47.035833 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-55dbd56b55-fpf9f"] Jan 27 12:34:47 crc kubenswrapper[4900]: I0127 12:34:47.865385 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-55dbd56b55-fpf9f" event={"ID":"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b","Type":"ContainerStarted","Data":"c371a6a229b6563fbfb2747d61304f2c7da956984a4aee5b83e3bd141e92cb96"} Jan 27 12:34:47 crc kubenswrapper[4900]: I0127 12:34:47.865845 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-55dbd56b55-fpf9f" event={"ID":"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b","Type":"ContainerStarted","Data":"e270133a38d34981a4bab8125836663a7664a2106742cc1ff6f55cf5958a17a2"} Jan 27 12:34:47 crc kubenswrapper[4900]: I0127 12:34:47.894669 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-55dbd56b55-fpf9f" podStartSLOduration=1.894633113 podStartE2EDuration="1.894633113s" podCreationTimestamp="2026-01-27 12:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:34:47.889008407 +0000 UTC m=+515.126036637" watchObservedRunningTime="2026-01-27 12:34:47.894633113 +0000 UTC m=+515.131661343" Jan 27 12:34:56 crc kubenswrapper[4900]: I0127 12:34:56.692888 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:56 crc kubenswrapper[4900]: I0127 12:34:56.693550 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:56 crc kubenswrapper[4900]: I0127 12:34:56.700931 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:56 crc kubenswrapper[4900]: I0127 12:34:56.936177 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:34:57 crc kubenswrapper[4900]: I0127 12:34:57.005772 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-644c4585f7-mjqr6"] Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.054564 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-644c4585f7-mjqr6" podUID="872d6b67-3d0d-41e3-9b9f-b8bc451fec7d" containerName="console" containerID="cri-o://ba795a7342211fb2dbb942eaf2adc1e4b74af73ee2652f689c5e1c756a1ec623" gracePeriod=15 Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.385836 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-644c4585f7-mjqr6_872d6b67-3d0d-41e3-9b9f-b8bc451fec7d/console/0.log" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.386159 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.445456 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ltk7\" (UniqueName: \"kubernetes.io/projected/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-kube-api-access-4ltk7\") pod \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.445554 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-config\") pod \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.445583 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-oauth-serving-cert\") pod \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.445622 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-oauth-config\") pod \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.445651 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-trusted-ca-bundle\") pod \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.445754 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-serving-cert\") pod \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.445800 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-service-ca\") pod \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\" (UID: \"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d\") " Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.446861 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-config" (OuterVolumeSpecName: "console-config") pod "872d6b67-3d0d-41e3-9b9f-b8bc451fec7d" (UID: "872d6b67-3d0d-41e3-9b9f-b8bc451fec7d"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.446883 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "872d6b67-3d0d-41e3-9b9f-b8bc451fec7d" (UID: "872d6b67-3d0d-41e3-9b9f-b8bc451fec7d"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.447043 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-service-ca" (OuterVolumeSpecName: "service-ca") pod "872d6b67-3d0d-41e3-9b9f-b8bc451fec7d" (UID: "872d6b67-3d0d-41e3-9b9f-b8bc451fec7d"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.447499 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "872d6b67-3d0d-41e3-9b9f-b8bc451fec7d" (UID: "872d6b67-3d0d-41e3-9b9f-b8bc451fec7d"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.452027 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "872d6b67-3d0d-41e3-9b9f-b8bc451fec7d" (UID: "872d6b67-3d0d-41e3-9b9f-b8bc451fec7d"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.452183 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "872d6b67-3d0d-41e3-9b9f-b8bc451fec7d" (UID: "872d6b67-3d0d-41e3-9b9f-b8bc451fec7d"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.452275 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-kube-api-access-4ltk7" (OuterVolumeSpecName: "kube-api-access-4ltk7") pod "872d6b67-3d0d-41e3-9b9f-b8bc451fec7d" (UID: "872d6b67-3d0d-41e3-9b9f-b8bc451fec7d"). InnerVolumeSpecName "kube-api-access-4ltk7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.547909 4900 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.547963 4900 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.547977 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ltk7\" (UniqueName: \"kubernetes.io/projected/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-kube-api-access-4ltk7\") on node \"crc\" DevicePath \"\"" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.547991 4900 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.548005 4900 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.548017 4900 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:35:22 crc kubenswrapper[4900]: I0127 12:35:22.548028 4900 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:35:23 crc kubenswrapper[4900]: I0127 12:35:23.121586 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-644c4585f7-mjqr6_872d6b67-3d0d-41e3-9b9f-b8bc451fec7d/console/0.log" Jan 27 12:35:23 crc kubenswrapper[4900]: I0127 12:35:23.121658 4900 generic.go:334] "Generic (PLEG): container finished" podID="872d6b67-3d0d-41e3-9b9f-b8bc451fec7d" containerID="ba795a7342211fb2dbb942eaf2adc1e4b74af73ee2652f689c5e1c756a1ec623" exitCode=2 Jan 27 12:35:23 crc kubenswrapper[4900]: I0127 12:35:23.121703 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-644c4585f7-mjqr6" event={"ID":"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d","Type":"ContainerDied","Data":"ba795a7342211fb2dbb942eaf2adc1e4b74af73ee2652f689c5e1c756a1ec623"} Jan 27 12:35:23 crc kubenswrapper[4900]: I0127 12:35:23.121800 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-644c4585f7-mjqr6" event={"ID":"872d6b67-3d0d-41e3-9b9f-b8bc451fec7d","Type":"ContainerDied","Data":"4dc70dcb7749ed90ac63fc0440af624e53fce58c4a5651b2a858dfda755a1209"} Jan 27 12:35:23 crc kubenswrapper[4900]: I0127 12:35:23.121836 4900 scope.go:117] "RemoveContainer" containerID="ba795a7342211fb2dbb942eaf2adc1e4b74af73ee2652f689c5e1c756a1ec623" Jan 27 12:35:23 crc kubenswrapper[4900]: I0127 12:35:23.122035 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-644c4585f7-mjqr6" Jan 27 12:35:23 crc kubenswrapper[4900]: I0127 12:35:23.145621 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-644c4585f7-mjqr6"] Jan 27 12:35:23 crc kubenswrapper[4900]: I0127 12:35:23.146071 4900 scope.go:117] "RemoveContainer" containerID="ba795a7342211fb2dbb942eaf2adc1e4b74af73ee2652f689c5e1c756a1ec623" Jan 27 12:35:23 crc kubenswrapper[4900]: E0127 12:35:23.146713 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba795a7342211fb2dbb942eaf2adc1e4b74af73ee2652f689c5e1c756a1ec623\": container with ID starting with ba795a7342211fb2dbb942eaf2adc1e4b74af73ee2652f689c5e1c756a1ec623 not found: ID does not exist" containerID="ba795a7342211fb2dbb942eaf2adc1e4b74af73ee2652f689c5e1c756a1ec623" Jan 27 12:35:23 crc kubenswrapper[4900]: I0127 12:35:23.146754 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba795a7342211fb2dbb942eaf2adc1e4b74af73ee2652f689c5e1c756a1ec623"} err="failed to get container status \"ba795a7342211fb2dbb942eaf2adc1e4b74af73ee2652f689c5e1c756a1ec623\": rpc error: code = NotFound desc = could not find container \"ba795a7342211fb2dbb942eaf2adc1e4b74af73ee2652f689c5e1c756a1ec623\": container with ID starting with ba795a7342211fb2dbb942eaf2adc1e4b74af73ee2652f689c5e1c756a1ec623 not found: ID does not exist" Jan 27 12:35:23 crc kubenswrapper[4900]: I0127 12:35:23.152006 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-644c4585f7-mjqr6"] Jan 27 12:35:24 crc kubenswrapper[4900]: I0127 12:35:24.494189 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="872d6b67-3d0d-41e3-9b9f-b8bc451fec7d" path="/var/lib/kubelet/pods/872d6b67-3d0d-41e3-9b9f-b8bc451fec7d/volumes" Jan 27 12:35:52 crc kubenswrapper[4900]: I0127 12:35:52.373012 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:35:52 crc kubenswrapper[4900]: I0127 12:35:52.374450 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:36:22 crc kubenswrapper[4900]: I0127 12:36:22.372457 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:36:22 crc kubenswrapper[4900]: I0127 12:36:22.373019 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:36:52 crc kubenswrapper[4900]: I0127 12:36:52.373158 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:36:52 crc kubenswrapper[4900]: I0127 12:36:52.373755 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:36:52 crc kubenswrapper[4900]: I0127 12:36:52.373814 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:36:52 crc kubenswrapper[4900]: I0127 12:36:52.374574 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"69e7e31d6ba683ecd2f02cfbc9899542c457d918143ad827391c0c3e681ae608"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 12:36:52 crc kubenswrapper[4900]: I0127 12:36:52.374632 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://69e7e31d6ba683ecd2f02cfbc9899542c457d918143ad827391c0c3e681ae608" gracePeriod=600 Jan 27 12:36:52 crc kubenswrapper[4900]: I0127 12:36:52.742745 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="69e7e31d6ba683ecd2f02cfbc9899542c457d918143ad827391c0c3e681ae608" exitCode=0 Jan 27 12:36:52 crc kubenswrapper[4900]: I0127 12:36:52.742809 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"69e7e31d6ba683ecd2f02cfbc9899542c457d918143ad827391c0c3e681ae608"} Jan 27 12:36:52 crc kubenswrapper[4900]: I0127 12:36:52.742898 4900 scope.go:117] "RemoveContainer" containerID="ef8a105b9e3140aef604e358616ef8284c481fa15985cc985f8c4ca4dcabda76" Jan 27 12:36:53 crc kubenswrapper[4900]: I0127 12:36:53.751725 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"13cb1cf2412c834432c235dfcd87c0da123068bd752d1f31f0e730cd7a97b24e"} Jan 27 12:37:45 crc kubenswrapper[4900]: I0127 12:37:45.195480 4900 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.217031 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx"] Jan 27 12:38:23 crc kubenswrapper[4900]: E0127 12:38:23.218261 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="872d6b67-3d0d-41e3-9b9f-b8bc451fec7d" containerName="console" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.218293 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="872d6b67-3d0d-41e3-9b9f-b8bc451fec7d" containerName="console" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.218532 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="872d6b67-3d0d-41e3-9b9f-b8bc451fec7d" containerName="console" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.220008 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.228193 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.230723 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx"] Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.389041 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fa0b888e-d846-473e-a436-c1e24be0e115-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx\" (UID: \"fa0b888e-d846-473e-a436-c1e24be0e115\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.389131 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqvzq\" (UniqueName: \"kubernetes.io/projected/fa0b888e-d846-473e-a436-c1e24be0e115-kube-api-access-nqvzq\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx\" (UID: \"fa0b888e-d846-473e-a436-c1e24be0e115\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.389197 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fa0b888e-d846-473e-a436-c1e24be0e115-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx\" (UID: \"fa0b888e-d846-473e-a436-c1e24be0e115\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.491332 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fa0b888e-d846-473e-a436-c1e24be0e115-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx\" (UID: \"fa0b888e-d846-473e-a436-c1e24be0e115\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.491430 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqvzq\" (UniqueName: \"kubernetes.io/projected/fa0b888e-d846-473e-a436-c1e24be0e115-kube-api-access-nqvzq\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx\" (UID: \"fa0b888e-d846-473e-a436-c1e24be0e115\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.491499 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fa0b888e-d846-473e-a436-c1e24be0e115-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx\" (UID: \"fa0b888e-d846-473e-a436-c1e24be0e115\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.491886 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fa0b888e-d846-473e-a436-c1e24be0e115-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx\" (UID: \"fa0b888e-d846-473e-a436-c1e24be0e115\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.492173 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fa0b888e-d846-473e-a436-c1e24be0e115-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx\" (UID: \"fa0b888e-d846-473e-a436-c1e24be0e115\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.511240 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqvzq\" (UniqueName: \"kubernetes.io/projected/fa0b888e-d846-473e-a436-c1e24be0e115-kube-api-access-nqvzq\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx\" (UID: \"fa0b888e-d846-473e-a436-c1e24be0e115\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.551337 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" Jan 27 12:38:23 crc kubenswrapper[4900]: I0127 12:38:23.905191 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx"] Jan 27 12:38:24 crc kubenswrapper[4900]: I0127 12:38:24.430638 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" event={"ID":"fa0b888e-d846-473e-a436-c1e24be0e115","Type":"ContainerStarted","Data":"c05449a8be3999e653cc33bfdca969f677557db3d2f6243c3dc3acc58525e24e"} Jan 27 12:38:24 crc kubenswrapper[4900]: I0127 12:38:24.431091 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" event={"ID":"fa0b888e-d846-473e-a436-c1e24be0e115","Type":"ContainerStarted","Data":"9fcf6700c52c3e201b43c95e0f81b14e36b79cec1b6c4fa660310ed9cc6d7671"} Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.439578 4900 generic.go:334] "Generic (PLEG): container finished" podID="fa0b888e-d846-473e-a436-c1e24be0e115" containerID="c05449a8be3999e653cc33bfdca969f677557db3d2f6243c3dc3acc58525e24e" exitCode=0 Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.439705 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" event={"ID":"fa0b888e-d846-473e-a436-c1e24be0e115","Type":"ContainerDied","Data":"c05449a8be3999e653cc33bfdca969f677557db3d2f6243c3dc3acc58525e24e"} Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.443006 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.556156 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2pmrh"] Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.558071 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.563282 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2pmrh"] Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.727015 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae12c605-62a3-49ee-86ad-21a6566672f7-utilities\") pod \"redhat-operators-2pmrh\" (UID: \"ae12c605-62a3-49ee-86ad-21a6566672f7\") " pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.727093 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfkhj\" (UniqueName: \"kubernetes.io/projected/ae12c605-62a3-49ee-86ad-21a6566672f7-kube-api-access-zfkhj\") pod \"redhat-operators-2pmrh\" (UID: \"ae12c605-62a3-49ee-86ad-21a6566672f7\") " pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.727287 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae12c605-62a3-49ee-86ad-21a6566672f7-catalog-content\") pod \"redhat-operators-2pmrh\" (UID: \"ae12c605-62a3-49ee-86ad-21a6566672f7\") " pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.828360 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae12c605-62a3-49ee-86ad-21a6566672f7-utilities\") pod \"redhat-operators-2pmrh\" (UID: \"ae12c605-62a3-49ee-86ad-21a6566672f7\") " pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.828425 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfkhj\" (UniqueName: \"kubernetes.io/projected/ae12c605-62a3-49ee-86ad-21a6566672f7-kube-api-access-zfkhj\") pod \"redhat-operators-2pmrh\" (UID: \"ae12c605-62a3-49ee-86ad-21a6566672f7\") " pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.828511 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae12c605-62a3-49ee-86ad-21a6566672f7-catalog-content\") pod \"redhat-operators-2pmrh\" (UID: \"ae12c605-62a3-49ee-86ad-21a6566672f7\") " pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.829300 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae12c605-62a3-49ee-86ad-21a6566672f7-utilities\") pod \"redhat-operators-2pmrh\" (UID: \"ae12c605-62a3-49ee-86ad-21a6566672f7\") " pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.829388 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae12c605-62a3-49ee-86ad-21a6566672f7-catalog-content\") pod \"redhat-operators-2pmrh\" (UID: \"ae12c605-62a3-49ee-86ad-21a6566672f7\") " pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.851083 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfkhj\" (UniqueName: \"kubernetes.io/projected/ae12c605-62a3-49ee-86ad-21a6566672f7-kube-api-access-zfkhj\") pod \"redhat-operators-2pmrh\" (UID: \"ae12c605-62a3-49ee-86ad-21a6566672f7\") " pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:25 crc kubenswrapper[4900]: I0127 12:38:25.887168 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:26 crc kubenswrapper[4900]: I0127 12:38:26.622070 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2pmrh"] Jan 27 12:38:26 crc kubenswrapper[4900]: W0127 12:38:26.685418 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae12c605_62a3_49ee_86ad_21a6566672f7.slice/crio-8a02fcd526b403b937d86c51e72090c36b5a20f7634ba335196eb86c210f0eb7 WatchSource:0}: Error finding container 8a02fcd526b403b937d86c51e72090c36b5a20f7634ba335196eb86c210f0eb7: Status 404 returned error can't find the container with id 8a02fcd526b403b937d86c51e72090c36b5a20f7634ba335196eb86c210f0eb7 Jan 27 12:38:27 crc kubenswrapper[4900]: I0127 12:38:27.455290 4900 generic.go:334] "Generic (PLEG): container finished" podID="ae12c605-62a3-49ee-86ad-21a6566672f7" containerID="f40787e1bcb1bff551be516b29ab49da84e911ce59ca174afe12c7de92e28402" exitCode=0 Jan 27 12:38:27 crc kubenswrapper[4900]: I0127 12:38:27.455376 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pmrh" event={"ID":"ae12c605-62a3-49ee-86ad-21a6566672f7","Type":"ContainerDied","Data":"f40787e1bcb1bff551be516b29ab49da84e911ce59ca174afe12c7de92e28402"} Jan 27 12:38:27 crc kubenswrapper[4900]: I0127 12:38:27.455750 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pmrh" event={"ID":"ae12c605-62a3-49ee-86ad-21a6566672f7","Type":"ContainerStarted","Data":"8a02fcd526b403b937d86c51e72090c36b5a20f7634ba335196eb86c210f0eb7"} Jan 27 12:38:27 crc kubenswrapper[4900]: I0127 12:38:27.457231 4900 generic.go:334] "Generic (PLEG): container finished" podID="fa0b888e-d846-473e-a436-c1e24be0e115" containerID="fe1cd27917debb0f57bb2869118be14cabe5a72afe52fab65ee8177890bcbf1a" exitCode=0 Jan 27 12:38:27 crc kubenswrapper[4900]: I0127 12:38:27.457305 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" event={"ID":"fa0b888e-d846-473e-a436-c1e24be0e115","Type":"ContainerDied","Data":"fe1cd27917debb0f57bb2869118be14cabe5a72afe52fab65ee8177890bcbf1a"} Jan 27 12:38:28 crc kubenswrapper[4900]: I0127 12:38:28.467646 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pmrh" event={"ID":"ae12c605-62a3-49ee-86ad-21a6566672f7","Type":"ContainerStarted","Data":"f00f32f7c97cd3ef34e64532c6a2ac272acd887c4315be32f67b22c8b012e939"} Jan 27 12:38:28 crc kubenswrapper[4900]: I0127 12:38:28.470295 4900 generic.go:334] "Generic (PLEG): container finished" podID="fa0b888e-d846-473e-a436-c1e24be0e115" containerID="7dea2f5c003ba00b87a15f57d2a35c17253f422e22ef008bbd6bda7448c5d989" exitCode=0 Jan 27 12:38:28 crc kubenswrapper[4900]: I0127 12:38:28.470339 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" event={"ID":"fa0b888e-d846-473e-a436-c1e24be0e115","Type":"ContainerDied","Data":"7dea2f5c003ba00b87a15f57d2a35c17253f422e22ef008bbd6bda7448c5d989"} Jan 27 12:38:30 crc kubenswrapper[4900]: I0127 12:38:30.060904 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" Jan 27 12:38:30 crc kubenswrapper[4900]: I0127 12:38:30.407524 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fa0b888e-d846-473e-a436-c1e24be0e115-util\") pod \"fa0b888e-d846-473e-a436-c1e24be0e115\" (UID: \"fa0b888e-d846-473e-a436-c1e24be0e115\") " Jan 27 12:38:30 crc kubenswrapper[4900]: I0127 12:38:30.407950 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqvzq\" (UniqueName: \"kubernetes.io/projected/fa0b888e-d846-473e-a436-c1e24be0e115-kube-api-access-nqvzq\") pod \"fa0b888e-d846-473e-a436-c1e24be0e115\" (UID: \"fa0b888e-d846-473e-a436-c1e24be0e115\") " Jan 27 12:38:30 crc kubenswrapper[4900]: I0127 12:38:30.408023 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fa0b888e-d846-473e-a436-c1e24be0e115-bundle\") pod \"fa0b888e-d846-473e-a436-c1e24be0e115\" (UID: \"fa0b888e-d846-473e-a436-c1e24be0e115\") " Jan 27 12:38:30 crc kubenswrapper[4900]: I0127 12:38:30.410711 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa0b888e-d846-473e-a436-c1e24be0e115-bundle" (OuterVolumeSpecName: "bundle") pod "fa0b888e-d846-473e-a436-c1e24be0e115" (UID: "fa0b888e-d846-473e-a436-c1e24be0e115"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:38:30 crc kubenswrapper[4900]: I0127 12:38:30.422151 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa0b888e-d846-473e-a436-c1e24be0e115-util" (OuterVolumeSpecName: "util") pod "fa0b888e-d846-473e-a436-c1e24be0e115" (UID: "fa0b888e-d846-473e-a436-c1e24be0e115"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:38:30 crc kubenswrapper[4900]: I0127 12:38:30.443146 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa0b888e-d846-473e-a436-c1e24be0e115-kube-api-access-nqvzq" (OuterVolumeSpecName: "kube-api-access-nqvzq") pod "fa0b888e-d846-473e-a436-c1e24be0e115" (UID: "fa0b888e-d846-473e-a436-c1e24be0e115"). InnerVolumeSpecName "kube-api-access-nqvzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:38:30 crc kubenswrapper[4900]: I0127 12:38:30.513088 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqvzq\" (UniqueName: \"kubernetes.io/projected/fa0b888e-d846-473e-a436-c1e24be0e115-kube-api-access-nqvzq\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:30 crc kubenswrapper[4900]: I0127 12:38:30.513314 4900 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fa0b888e-d846-473e-a436-c1e24be0e115-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:30 crc kubenswrapper[4900]: I0127 12:38:30.513407 4900 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fa0b888e-d846-473e-a436-c1e24be0e115-util\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:30 crc kubenswrapper[4900]: I0127 12:38:30.544543 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" Jan 27 12:38:30 crc kubenswrapper[4900]: I0127 12:38:30.545747 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx" event={"ID":"fa0b888e-d846-473e-a436-c1e24be0e115","Type":"ContainerDied","Data":"9fcf6700c52c3e201b43c95e0f81b14e36b79cec1b6c4fa660310ed9cc6d7671"} Jan 27 12:38:30 crc kubenswrapper[4900]: I0127 12:38:30.545814 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9fcf6700c52c3e201b43c95e0f81b14e36b79cec1b6c4fa660310ed9cc6d7671" Jan 27 12:38:31 crc kubenswrapper[4900]: I0127 12:38:31.553488 4900 generic.go:334] "Generic (PLEG): container finished" podID="ae12c605-62a3-49ee-86ad-21a6566672f7" containerID="f00f32f7c97cd3ef34e64532c6a2ac272acd887c4315be32f67b22c8b012e939" exitCode=0 Jan 27 12:38:31 crc kubenswrapper[4900]: I0127 12:38:31.553544 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pmrh" event={"ID":"ae12c605-62a3-49ee-86ad-21a6566672f7","Type":"ContainerDied","Data":"f00f32f7c97cd3ef34e64532c6a2ac272acd887c4315be32f67b22c8b012e939"} Jan 27 12:38:32 crc kubenswrapper[4900]: I0127 12:38:32.570104 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pmrh" event={"ID":"ae12c605-62a3-49ee-86ad-21a6566672f7","Type":"ContainerStarted","Data":"6818abf421c51e5c914e873983efa2ce22664447c85b2bd124f0c3dc8b33dd2c"} Jan 27 12:38:32 crc kubenswrapper[4900]: I0127 12:38:32.589863 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2pmrh" podStartSLOduration=2.997642611 podStartE2EDuration="7.589833644s" podCreationTimestamp="2026-01-27 12:38:25 +0000 UTC" firstStartedPulling="2026-01-27 12:38:27.457794465 +0000 UTC m=+734.694822675" lastFinishedPulling="2026-01-27 12:38:32.049985488 +0000 UTC m=+739.287013708" observedRunningTime="2026-01-27 12:38:32.588216317 +0000 UTC m=+739.825244537" watchObservedRunningTime="2026-01-27 12:38:32.589833644 +0000 UTC m=+739.826861854" Jan 27 12:38:33 crc kubenswrapper[4900]: I0127 12:38:33.987948 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-dqltj"] Jan 27 12:38:33 crc kubenswrapper[4900]: I0127 12:38:33.988584 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="ovn-controller" containerID="cri-o://60da90add614230d95b112403a98a2405640af883ae37b369043fc310facd9c5" gracePeriod=30 Jan 27 12:38:33 crc kubenswrapper[4900]: I0127 12:38:33.988614 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="nbdb" containerID="cri-o://f2d5441aa7877eac34cbb8ff8d85aea47a33853940815aae71cccd0c375c0f0c" gracePeriod=30 Jan 27 12:38:33 crc kubenswrapper[4900]: I0127 12:38:33.988645 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://03cd682fbafb58d28a6ce344c4ae0452c421e5adeea277ce9a2f43ae3739f5a9" gracePeriod=30 Jan 27 12:38:33 crc kubenswrapper[4900]: I0127 12:38:33.988694 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="kube-rbac-proxy-node" containerID="cri-o://767483b4ee320dae20ff9a4ad30712ab30ded730d25f7ba2a3cdc94007c8d98d" gracePeriod=30 Jan 27 12:38:33 crc kubenswrapper[4900]: I0127 12:38:33.988728 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="ovn-acl-logging" containerID="cri-o://977db28e363d05b897c2144d2eaa3ae9ed3d311da4a6f1c16e2b2fe06a8bc5d1" gracePeriod=30 Jan 27 12:38:33 crc kubenswrapper[4900]: I0127 12:38:33.988758 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="northd" containerID="cri-o://3f5fb675be3f19a0f06218c3aefc687f195b3c99c2df38d78b9bd4b3e15b5264" gracePeriod=30 Jan 27 12:38:33 crc kubenswrapper[4900]: I0127 12:38:33.988790 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="sbdb" containerID="cri-o://9f25e3ed7531a11ded9fef3eb5ea317c80bbd3c8474509f4b2cf45c9702fae40" gracePeriod=30 Jan 27 12:38:34 crc kubenswrapper[4900]: I0127 12:38:34.024780 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="ovnkube-controller" containerID="cri-o://02f786492a42aa6d248f0e4d2bb3278a1e4e49f6bf493122e3c4652f7cb939ae" gracePeriod=30 Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.622860 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-twlmq_02bfa799-f281-465d-ab6f-19ea9c16979c/kube-multus/0.log" Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.623340 4900 generic.go:334] "Generic (PLEG): container finished" podID="02bfa799-f281-465d-ab6f-19ea9c16979c" containerID="5409c6fd120f00c9c06539ee7ad612a50874982417a1bf5f14d2867689d31c48" exitCode=2 Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.623808 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-twlmq" event={"ID":"02bfa799-f281-465d-ab6f-19ea9c16979c","Type":"ContainerDied","Data":"5409c6fd120f00c9c06539ee7ad612a50874982417a1bf5f14d2867689d31c48"} Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.624770 4900 scope.go:117] "RemoveContainer" containerID="5409c6fd120f00c9c06539ee7ad612a50874982417a1bf5f14d2867689d31c48" Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.657944 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dqltj_8f2b59d6-a608-43ab-a496-c2d0b46b6c2f/ovn-acl-logging/0.log" Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.659335 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dqltj_8f2b59d6-a608-43ab-a496-c2d0b46b6c2f/ovn-controller/0.log" Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.659915 4900 generic.go:334] "Generic (PLEG): container finished" podID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerID="02f786492a42aa6d248f0e4d2bb3278a1e4e49f6bf493122e3c4652f7cb939ae" exitCode=0 Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.659968 4900 generic.go:334] "Generic (PLEG): container finished" podID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerID="9f25e3ed7531a11ded9fef3eb5ea317c80bbd3c8474509f4b2cf45c9702fae40" exitCode=0 Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.659992 4900 generic.go:334] "Generic (PLEG): container finished" podID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerID="f2d5441aa7877eac34cbb8ff8d85aea47a33853940815aae71cccd0c375c0f0c" exitCode=0 Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.660008 4900 generic.go:334] "Generic (PLEG): container finished" podID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerID="3f5fb675be3f19a0f06218c3aefc687f195b3c99c2df38d78b9bd4b3e15b5264" exitCode=0 Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.660023 4900 generic.go:334] "Generic (PLEG): container finished" podID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerID="03cd682fbafb58d28a6ce344c4ae0452c421e5adeea277ce9a2f43ae3739f5a9" exitCode=0 Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.660032 4900 generic.go:334] "Generic (PLEG): container finished" podID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerID="767483b4ee320dae20ff9a4ad30712ab30ded730d25f7ba2a3cdc94007c8d98d" exitCode=0 Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.660048 4900 generic.go:334] "Generic (PLEG): container finished" podID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerID="977db28e363d05b897c2144d2eaa3ae9ed3d311da4a6f1c16e2b2fe06a8bc5d1" exitCode=143 Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.660101 4900 generic.go:334] "Generic (PLEG): container finished" podID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerID="60da90add614230d95b112403a98a2405640af883ae37b369043fc310facd9c5" exitCode=143 Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.660140 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerDied","Data":"02f786492a42aa6d248f0e4d2bb3278a1e4e49f6bf493122e3c4652f7cb939ae"} Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.660197 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerDied","Data":"9f25e3ed7531a11ded9fef3eb5ea317c80bbd3c8474509f4b2cf45c9702fae40"} Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.660219 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerDied","Data":"f2d5441aa7877eac34cbb8ff8d85aea47a33853940815aae71cccd0c375c0f0c"} Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.660242 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerDied","Data":"3f5fb675be3f19a0f06218c3aefc687f195b3c99c2df38d78b9bd4b3e15b5264"} Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.660261 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerDied","Data":"03cd682fbafb58d28a6ce344c4ae0452c421e5adeea277ce9a2f43ae3739f5a9"} Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.660272 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerDied","Data":"767483b4ee320dae20ff9a4ad30712ab30ded730d25f7ba2a3cdc94007c8d98d"} Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.660291 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerDied","Data":"977db28e363d05b897c2144d2eaa3ae9ed3d311da4a6f1c16e2b2fe06a8bc5d1"} Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.660303 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerDied","Data":"60da90add614230d95b112403a98a2405640af883ae37b369043fc310facd9c5"} Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.887918 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:35 crc kubenswrapper[4900]: I0127 12:38:35.888486 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.651393 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dqltj_8f2b59d6-a608-43ab-a496-c2d0b46b6c2f/ovn-acl-logging/0.log" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.652496 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dqltj_8f2b59d6-a608-43ab-a496-c2d0b46b6c2f/ovn-controller/0.log" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.653439 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.671252 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-twlmq_02bfa799-f281-465d-ab6f-19ea9c16979c/kube-multus/0.log" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.671351 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-twlmq" event={"ID":"02bfa799-f281-465d-ab6f-19ea9c16979c","Type":"ContainerStarted","Data":"7688ed7fbf63aba6a357d389ffd12373edcc9f43b583510c967eda75fe0a2ece"} Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.676389 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dqltj_8f2b59d6-a608-43ab-a496-c2d0b46b6c2f/ovn-acl-logging/0.log" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.676971 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-dqltj_8f2b59d6-a608-43ab-a496-c2d0b46b6c2f/ovn-controller/0.log" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.678245 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" event={"ID":"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f","Type":"ContainerDied","Data":"d7e2abfcc9368485b2086b239d1f164898d71197fb9f86ede4ba99cc1a4ffdd6"} Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.678293 4900 scope.go:117] "RemoveContainer" containerID="02f786492a42aa6d248f0e4d2bb3278a1e4e49f6bf493122e3c4652f7cb939ae" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.678457 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dqltj" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679248 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-run-netns\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679279 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-openvswitch\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679322 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-node-log\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679346 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-run-ovn-kubernetes\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679353 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679385 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-systemd-units\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679401 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-log-socket\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679406 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679428 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-ovn\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679429 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-node-log" (OuterVolumeSpecName: "node-log") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679438 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679472 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-var-lib-openvswitch\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679506 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-etc-openvswitch\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679469 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-log-socket" (OuterVolumeSpecName: "log-socket") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679513 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679556 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679557 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679544 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-systemd\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679595 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679673 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-env-overrides\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679757 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovnkube-config\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679788 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-kubelet\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679836 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovnkube-script-lib\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679867 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-slash\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679895 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679931 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2h7s6\" (UniqueName: \"kubernetes.io/projected/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-kube-api-access-2h7s6\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679959 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.679974 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-cni-netd\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680003 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovn-node-metrics-cert\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680007 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680021 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-cni-bin\") pod \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\" (UID: \"8f2b59d6-a608-43ab-a496-c2d0b46b6c2f\") " Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680021 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680050 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680211 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680214 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680535 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-slash" (OuterVolumeSpecName: "host-slash") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680687 4900 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680708 4900 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680727 4900 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-node-log\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680736 4900 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680744 4900 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680756 4900 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-log-socket\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680764 4900 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680778 4900 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680788 4900 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680803 4900 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680812 4900 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680824 4900 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680834 4900 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-slash\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680843 4900 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680856 4900 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.680864 4900 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.681479 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.701352 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.708073 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-kube-api-access-2h7s6" (OuterVolumeSpecName: "kube-api-access-2h7s6") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "kube-api-access-2h7s6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.765280 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" (UID: "8f2b59d6-a608-43ab-a496-c2d0b46b6c2f"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.778627 4900 scope.go:117] "RemoveContainer" containerID="9f25e3ed7531a11ded9fef3eb5ea317c80bbd3c8474509f4b2cf45c9702fae40" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.781306 4900 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.781340 4900 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.781351 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2h7s6\" (UniqueName: \"kubernetes.io/projected/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-kube-api-access-2h7s6\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.781360 4900 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.813246 4900 scope.go:117] "RemoveContainer" containerID="f2d5441aa7877eac34cbb8ff8d85aea47a33853940815aae71cccd0c375c0f0c" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.838443 4900 scope.go:117] "RemoveContainer" containerID="3f5fb675be3f19a0f06218c3aefc687f195b3c99c2df38d78b9bd4b3e15b5264" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.849671 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-6m7fv"] Jan 27 12:38:36 crc kubenswrapper[4900]: E0127 12:38:36.850015 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="kube-rbac-proxy-ovn-metrics" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850040 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="kube-rbac-proxy-ovn-metrics" Jan 27 12:38:36 crc kubenswrapper[4900]: E0127 12:38:36.850074 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="ovn-acl-logging" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850084 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="ovn-acl-logging" Jan 27 12:38:36 crc kubenswrapper[4900]: E0127 12:38:36.850093 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="kube-rbac-proxy-node" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850101 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="kube-rbac-proxy-node" Jan 27 12:38:36 crc kubenswrapper[4900]: E0127 12:38:36.850112 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="ovnkube-controller" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850119 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="ovnkube-controller" Jan 27 12:38:36 crc kubenswrapper[4900]: E0127 12:38:36.850136 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="sbdb" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850143 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="sbdb" Jan 27 12:38:36 crc kubenswrapper[4900]: E0127 12:38:36.850155 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="nbdb" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850162 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="nbdb" Jan 27 12:38:36 crc kubenswrapper[4900]: E0127 12:38:36.850176 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa0b888e-d846-473e-a436-c1e24be0e115" containerName="util" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850184 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa0b888e-d846-473e-a436-c1e24be0e115" containerName="util" Jan 27 12:38:36 crc kubenswrapper[4900]: E0127 12:38:36.850197 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="northd" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850204 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="northd" Jan 27 12:38:36 crc kubenswrapper[4900]: E0127 12:38:36.850211 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa0b888e-d846-473e-a436-c1e24be0e115" containerName="pull" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850218 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa0b888e-d846-473e-a436-c1e24be0e115" containerName="pull" Jan 27 12:38:36 crc kubenswrapper[4900]: E0127 12:38:36.850227 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="kubecfg-setup" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850234 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="kubecfg-setup" Jan 27 12:38:36 crc kubenswrapper[4900]: E0127 12:38:36.850248 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="ovn-controller" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850256 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="ovn-controller" Jan 27 12:38:36 crc kubenswrapper[4900]: E0127 12:38:36.850266 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa0b888e-d846-473e-a436-c1e24be0e115" containerName="extract" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850273 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa0b888e-d846-473e-a436-c1e24be0e115" containerName="extract" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850407 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="ovn-controller" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850421 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="kube-rbac-proxy-ovn-metrics" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850430 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="nbdb" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850440 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="kube-rbac-proxy-node" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850453 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="northd" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850461 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa0b888e-d846-473e-a436-c1e24be0e115" containerName="extract" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850475 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="ovn-acl-logging" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850484 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="ovnkube-controller" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.850497 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" containerName="sbdb" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.869623 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.883571 4900 scope.go:117] "RemoveContainer" containerID="03cd682fbafb58d28a6ce344c4ae0452c421e5adeea277ce9a2f43ae3739f5a9" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887286 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-run-ovn\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887348 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-cni-bin\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887379 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887429 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-systemd-units\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887458 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9rwg\" (UniqueName: \"kubernetes.io/projected/f09de7b8-4cf1-47f1-9e4b-922f0588c458-kube-api-access-m9rwg\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887498 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-kubelet\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887525 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-run-systemd\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887566 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-cni-netd\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887603 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-run-netns\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887624 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-var-lib-openvswitch\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887655 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-slash\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887678 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-run-openvswitch\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887724 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-log-socket\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887771 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f09de7b8-4cf1-47f1-9e4b-922f0588c458-ovnkube-config\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887825 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f09de7b8-4cf1-47f1-9e4b-922f0588c458-env-overrides\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887857 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f09de7b8-4cf1-47f1-9e4b-922f0588c458-ovn-node-metrics-cert\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887881 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-node-log\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887908 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-run-ovn-kubernetes\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.887951 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f09de7b8-4cf1-47f1-9e4b-922f0588c458-ovnkube-script-lib\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.888038 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-etc-openvswitch\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.921112 4900 scope.go:117] "RemoveContainer" containerID="767483b4ee320dae20ff9a4ad30712ab30ded730d25f7ba2a3cdc94007c8d98d" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.944389 4900 scope.go:117] "RemoveContainer" containerID="977db28e363d05b897c2144d2eaa3ae9ed3d311da4a6f1c16e2b2fe06a8bc5d1" Jan 27 12:38:36 crc kubenswrapper[4900]: I0127 12:38:36.944904 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2pmrh" podUID="ae12c605-62a3-49ee-86ad-21a6566672f7" containerName="registry-server" probeResult="failure" output=< Jan 27 12:38:36 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 12:38:36 crc kubenswrapper[4900]: > Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:36.969389 4900 scope.go:117] "RemoveContainer" containerID="60da90add614230d95b112403a98a2405640af883ae37b369043fc310facd9c5" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:36.988285 4900 scope.go:117] "RemoveContainer" containerID="d1fd13558f0a43abfeeefac84c97554bafca6adb58e385bfaa157f4578f37b66" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.052875 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-node-log\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.052931 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-run-ovn-kubernetes\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.052988 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f09de7b8-4cf1-47f1-9e4b-922f0588c458-ovnkube-script-lib\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053014 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-etc-openvswitch\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053045 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-run-ovn\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053099 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-cni-bin\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053123 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053150 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-systemd-units\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053175 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9rwg\" (UniqueName: \"kubernetes.io/projected/f09de7b8-4cf1-47f1-9e4b-922f0588c458-kube-api-access-m9rwg\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053195 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-kubelet\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053214 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-run-systemd\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053235 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-cni-netd\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053259 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-run-netns\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053274 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-var-lib-openvswitch\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053298 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-slash\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053308 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-systemd-units\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053354 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-run-openvswitch\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053313 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-run-openvswitch\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053399 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-etc-openvswitch\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053411 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-log-socket\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053431 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f09de7b8-4cf1-47f1-9e4b-922f0588c458-ovnkube-config\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053438 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-run-ovn\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053516 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f09de7b8-4cf1-47f1-9e4b-922f0588c458-env-overrides\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053548 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f09de7b8-4cf1-47f1-9e4b-922f0588c458-ovn-node-metrics-cert\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053758 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-run-netns\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053264 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-run-ovn-kubernetes\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053926 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-run-systemd\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053905 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-var-lib-openvswitch\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053872 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f09de7b8-4cf1-47f1-9e4b-922f0588c458-ovnkube-script-lib\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.054015 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-kubelet\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.054070 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f09de7b8-4cf1-47f1-9e4b-922f0588c458-ovnkube-config\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.054118 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-slash\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.054145 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-log-socket\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.054172 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-cni-bin\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.053048 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-node-log\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.054206 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.054308 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f09de7b8-4cf1-47f1-9e4b-922f0588c458-host-cni-netd\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.054535 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f09de7b8-4cf1-47f1-9e4b-922f0588c458-env-overrides\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.062674 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f09de7b8-4cf1-47f1-9e4b-922f0588c458-ovn-node-metrics-cert\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.276337 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9rwg\" (UniqueName: \"kubernetes.io/projected/f09de7b8-4cf1-47f1-9e4b-922f0588c458-kube-api-access-m9rwg\") pod \"ovnkube-node-6m7fv\" (UID: \"f09de7b8-4cf1-47f1-9e4b-922f0588c458\") " pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.616733 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.707978 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" event={"ID":"f09de7b8-4cf1-47f1-9e4b-922f0588c458","Type":"ContainerStarted","Data":"520d90ee8cef618263c2fa525419d1d316b4f43ce2f3a107a92e48fabc222d26"} Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.710453 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-dqltj"] Jan 27 12:38:37 crc kubenswrapper[4900]: I0127 12:38:37.744143 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-dqltj"] Jan 27 12:38:38 crc kubenswrapper[4900]: I0127 12:38:38.491403 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f2b59d6-a608-43ab-a496-c2d0b46b6c2f" path="/var/lib/kubelet/pods/8f2b59d6-a608-43ab-a496-c2d0b46b6c2f/volumes" Jan 27 12:38:38 crc kubenswrapper[4900]: I0127 12:38:38.729297 4900 generic.go:334] "Generic (PLEG): container finished" podID="f09de7b8-4cf1-47f1-9e4b-922f0588c458" containerID="c2b4c5f77687534f340025ad7a592be376638ebd35002d7f15b5bf20c8e1be70" exitCode=0 Jan 27 12:38:38 crc kubenswrapper[4900]: I0127 12:38:38.729372 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" event={"ID":"f09de7b8-4cf1-47f1-9e4b-922f0588c458","Type":"ContainerDied","Data":"c2b4c5f77687534f340025ad7a592be376638ebd35002d7f15b5bf20c8e1be70"} Jan 27 12:38:39 crc kubenswrapper[4900]: I0127 12:38:39.738975 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" event={"ID":"f09de7b8-4cf1-47f1-9e4b-922f0588c458","Type":"ContainerStarted","Data":"bbef005da53a13e8863d6adb1a6c66d9bc83b8c83bb9abb7757d8f9447e9d20d"} Jan 27 12:38:39 crc kubenswrapper[4900]: I0127 12:38:39.739043 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" event={"ID":"f09de7b8-4cf1-47f1-9e4b-922f0588c458","Type":"ContainerStarted","Data":"3a5cd6e16a65e663c6499f9c9a5379b8ac31703f3f2e31bb622c44a96a84b172"} Jan 27 12:38:40 crc kubenswrapper[4900]: I0127 12:38:40.750195 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" event={"ID":"f09de7b8-4cf1-47f1-9e4b-922f0588c458","Type":"ContainerStarted","Data":"e975dfe57440cb43d3cacf8e0e99711c098e689fb1cf9f573d39ca5877d09759"} Jan 27 12:38:40 crc kubenswrapper[4900]: I0127 12:38:40.750611 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" event={"ID":"f09de7b8-4cf1-47f1-9e4b-922f0588c458","Type":"ContainerStarted","Data":"b8f7469bcd4987ea07aaa01da5a0a7c9ccf339b00dc52f196e5bef46f2510f7a"} Jan 27 12:38:40 crc kubenswrapper[4900]: I0127 12:38:40.750641 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" event={"ID":"f09de7b8-4cf1-47f1-9e4b-922f0588c458","Type":"ContainerStarted","Data":"c760354e726c33e6a0726f226f301b16fda1ed71c6f9f21086cc0bbb5a2f1cf0"} Jan 27 12:38:40 crc kubenswrapper[4900]: I0127 12:38:40.750668 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" event={"ID":"f09de7b8-4cf1-47f1-9e4b-922f0588c458","Type":"ContainerStarted","Data":"f25a5b4a9765a49f56f33c41dc9d4bccbb97675cc303876311e27bcdb15c80d2"} Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.065125 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" event={"ID":"f09de7b8-4cf1-47f1-9e4b-922f0588c458","Type":"ContainerStarted","Data":"240bcb6cab04b31194c14e7d675b4e64390c0d2d6e576a23b401f4da3589dbf5"} Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.142937 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn"] Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.143965 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.149526 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-jw7b6" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.149527 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.150007 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.268332 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js"] Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.298128 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smwjr\" (UniqueName: \"kubernetes.io/projected/61aa23bf-0ece-4bf6-a963-542bd8b399c6-kube-api-access-smwjr\") pod \"obo-prometheus-operator-68bc856cb9-76fmn\" (UID: \"61aa23bf-0ece-4bf6-a963-542bd8b399c6\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.303897 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.307926 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk"] Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.307958 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-g4dcq" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.308301 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.309684 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.399710 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smwjr\" (UniqueName: \"kubernetes.io/projected/61aa23bf-0ece-4bf6-a963-542bd8b399c6-kube-api-access-smwjr\") pod \"obo-prometheus-operator-68bc856cb9-76fmn\" (UID: \"61aa23bf-0ece-4bf6-a963-542bd8b399c6\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.421628 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smwjr\" (UniqueName: \"kubernetes.io/projected/61aa23bf-0ece-4bf6-a963-542bd8b399c6-kube-api-access-smwjr\") pod \"obo-prometheus-operator-68bc856cb9-76fmn\" (UID: \"61aa23bf-0ece-4bf6-a963-542bd8b399c6\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.461317 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.472019 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-p424v"] Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.473614 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.475981 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-mkhhs" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.476747 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.496696 4900 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-76fmn_openshift-operators_61aa23bf-0ece-4bf6-a963-542bd8b399c6_0(d687d56b515bceace19d97557eb33b92f748e1d6294565308e564b9e5aab2885): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.496798 4900 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-76fmn_openshift-operators_61aa23bf-0ece-4bf6-a963-542bd8b399c6_0(d687d56b515bceace19d97557eb33b92f748e1d6294565308e564b9e5aab2885): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.496824 4900 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-76fmn_openshift-operators_61aa23bf-0ece-4bf6-a963-542bd8b399c6_0(d687d56b515bceace19d97557eb33b92f748e1d6294565308e564b9e5aab2885): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.496886 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-68bc856cb9-76fmn_openshift-operators(61aa23bf-0ece-4bf6-a963-542bd8b399c6)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-68bc856cb9-76fmn_openshift-operators(61aa23bf-0ece-4bf6-a963-542bd8b399c6)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-76fmn_openshift-operators_61aa23bf-0ece-4bf6-a963-542bd8b399c6_0(d687d56b515bceace19d97557eb33b92f748e1d6294565308e564b9e5aab2885): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" podUID="61aa23bf-0ece-4bf6-a963-542bd8b399c6" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.502168 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/25cea2e4-2822-42b8-bd98-6a7f99e69c75-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5f87745447-mhmkk\" (UID: \"25cea2e4-2822-42b8-bd98-6a7f99e69c75\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.502220 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/70cde964-7f6b-42e4-83f2-87e67664e70c-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5f87745447-sn6js\" (UID: \"70cde964-7f6b-42e4-83f2-87e67664e70c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.502257 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990-observability-operator-tls\") pod \"observability-operator-59bdc8b94-p424v\" (UID: \"6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990\") " pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.502479 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/25cea2e4-2822-42b8-bd98-6a7f99e69c75-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5f87745447-mhmkk\" (UID: \"25cea2e4-2822-42b8-bd98-6a7f99e69c75\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.502705 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfkht\" (UniqueName: \"kubernetes.io/projected/6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990-kube-api-access-mfkht\") pod \"observability-operator-59bdc8b94-p424v\" (UID: \"6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990\") " pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.502749 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/70cde964-7f6b-42e4-83f2-87e67664e70c-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5f87745447-sn6js\" (UID: \"70cde964-7f6b-42e4-83f2-87e67664e70c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.604949 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/25cea2e4-2822-42b8-bd98-6a7f99e69c75-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5f87745447-mhmkk\" (UID: \"25cea2e4-2822-42b8-bd98-6a7f99e69c75\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.605016 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/70cde964-7f6b-42e4-83f2-87e67664e70c-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5f87745447-sn6js\" (UID: \"70cde964-7f6b-42e4-83f2-87e67664e70c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.605098 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990-observability-operator-tls\") pod \"observability-operator-59bdc8b94-p424v\" (UID: \"6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990\") " pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.605211 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/25cea2e4-2822-42b8-bd98-6a7f99e69c75-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5f87745447-mhmkk\" (UID: \"25cea2e4-2822-42b8-bd98-6a7f99e69c75\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.605897 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfkht\" (UniqueName: \"kubernetes.io/projected/6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990-kube-api-access-mfkht\") pod \"observability-operator-59bdc8b94-p424v\" (UID: \"6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990\") " pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.605945 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/70cde964-7f6b-42e4-83f2-87e67664e70c-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5f87745447-sn6js\" (UID: \"70cde964-7f6b-42e4-83f2-87e67664e70c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.614817 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/25cea2e4-2822-42b8-bd98-6a7f99e69c75-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5f87745447-mhmkk\" (UID: \"25cea2e4-2822-42b8-bd98-6a7f99e69c75\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.618657 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/25cea2e4-2822-42b8-bd98-6a7f99e69c75-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5f87745447-mhmkk\" (UID: \"25cea2e4-2822-42b8-bd98-6a7f99e69c75\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.619130 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/70cde964-7f6b-42e4-83f2-87e67664e70c-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5f87745447-sn6js\" (UID: \"70cde964-7f6b-42e4-83f2-87e67664e70c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.631683 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/70cde964-7f6b-42e4-83f2-87e67664e70c-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5f87745447-sn6js\" (UID: \"70cde964-7f6b-42e4-83f2-87e67664e70c\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.632001 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990-observability-operator-tls\") pod \"observability-operator-59bdc8b94-p424v\" (UID: \"6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990\") " pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.632092 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.641442 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.649758 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfkht\" (UniqueName: \"kubernetes.io/projected/6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990-kube-api-access-mfkht\") pod \"observability-operator-59bdc8b94-p424v\" (UID: \"6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990\") " pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.687256 4900 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-sn6js_openshift-operators_70cde964-7f6b-42e4-83f2-87e67664e70c_0(9b74c497f1d4bf4749bb1cbedac405328403ff12afd6e8d6b755ed640bb74f17): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.687336 4900 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-sn6js_openshift-operators_70cde964-7f6b-42e4-83f2-87e67664e70c_0(9b74c497f1d4bf4749bb1cbedac405328403ff12afd6e8d6b755ed640bb74f17): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.687359 4900 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-sn6js_openshift-operators_70cde964-7f6b-42e4-83f2-87e67664e70c_0(9b74c497f1d4bf4749bb1cbedac405328403ff12afd6e8d6b755ed640bb74f17): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.687406 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5f87745447-sn6js_openshift-operators(70cde964-7f6b-42e4-83f2-87e67664e70c)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5f87745447-sn6js_openshift-operators(70cde964-7f6b-42e4-83f2-87e67664e70c)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-sn6js_openshift-operators_70cde964-7f6b-42e4-83f2-87e67664e70c_0(9b74c497f1d4bf4749bb1cbedac405328403ff12afd6e8d6b755ed640bb74f17): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" podUID="70cde964-7f6b-42e4-83f2-87e67664e70c" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.701268 4900 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-mhmkk_openshift-operators_25cea2e4-2822-42b8-bd98-6a7f99e69c75_0(3b15bba95dc26c87abab5f9b46e5161ba761cb778cc7ab68131336d11ed5d88c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.701360 4900 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-mhmkk_openshift-operators_25cea2e4-2822-42b8-bd98-6a7f99e69c75_0(3b15bba95dc26c87abab5f9b46e5161ba761cb778cc7ab68131336d11ed5d88c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.701392 4900 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-mhmkk_openshift-operators_25cea2e4-2822-42b8-bd98-6a7f99e69c75_0(3b15bba95dc26c87abab5f9b46e5161ba761cb778cc7ab68131336d11ed5d88c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.701459 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5f87745447-mhmkk_openshift-operators(25cea2e4-2822-42b8-bd98-6a7f99e69c75)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5f87745447-mhmkk_openshift-operators(25cea2e4-2822-42b8-bd98-6a7f99e69c75)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-mhmkk_openshift-operators_25cea2e4-2822-42b8-bd98-6a7f99e69c75_0(3b15bba95dc26c87abab5f9b46e5161ba761cb778cc7ab68131336d11ed5d88c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" podUID="25cea2e4-2822-42b8-bd98-6a7f99e69c75" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.740138 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-cbtnw"] Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.741006 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.746129 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-btdtl" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.814185 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txmdl\" (UniqueName: \"kubernetes.io/projected/b925e9c1-ac78-41d5-a783-88a95ae66df6-kube-api-access-txmdl\") pod \"perses-operator-5bf474d74f-cbtnw\" (UID: \"b925e9c1-ac78-41d5-a783-88a95ae66df6\") " pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.814289 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/b925e9c1-ac78-41d5-a783-88a95ae66df6-openshift-service-ca\") pod \"perses-operator-5bf474d74f-cbtnw\" (UID: \"b925e9c1-ac78-41d5-a783-88a95ae66df6\") " pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.837406 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.873905 4900 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-p424v_openshift-operators_6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990_0(aaf0290bc0ca9923e78305a95b4b711d43c0b28ff92921df0d96e7b7c2ff4326): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.874012 4900 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-p424v_openshift-operators_6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990_0(aaf0290bc0ca9923e78305a95b4b711d43c0b28ff92921df0d96e7b7c2ff4326): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.874076 4900 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-p424v_openshift-operators_6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990_0(aaf0290bc0ca9923e78305a95b4b711d43c0b28ff92921df0d96e7b7c2ff4326): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:38:44 crc kubenswrapper[4900]: E0127 12:38:44.874144 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-59bdc8b94-p424v_openshift-operators(6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-59bdc8b94-p424v_openshift-operators(6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-p424v_openshift-operators_6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990_0(aaf0290bc0ca9923e78305a95b4b711d43c0b28ff92921df0d96e7b7c2ff4326): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-59bdc8b94-p424v" podUID="6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.915627 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txmdl\" (UniqueName: \"kubernetes.io/projected/b925e9c1-ac78-41d5-a783-88a95ae66df6-kube-api-access-txmdl\") pod \"perses-operator-5bf474d74f-cbtnw\" (UID: \"b925e9c1-ac78-41d5-a783-88a95ae66df6\") " pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.916038 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/b925e9c1-ac78-41d5-a783-88a95ae66df6-openshift-service-ca\") pod \"perses-operator-5bf474d74f-cbtnw\" (UID: \"b925e9c1-ac78-41d5-a783-88a95ae66df6\") " pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.917182 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/b925e9c1-ac78-41d5-a783-88a95ae66df6-openshift-service-ca\") pod \"perses-operator-5bf474d74f-cbtnw\" (UID: \"b925e9c1-ac78-41d5-a783-88a95ae66df6\") " pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:44 crc kubenswrapper[4900]: I0127 12:38:44.933436 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txmdl\" (UniqueName: \"kubernetes.io/projected/b925e9c1-ac78-41d5-a783-88a95ae66df6-kube-api-access-txmdl\") pod \"perses-operator-5bf474d74f-cbtnw\" (UID: \"b925e9c1-ac78-41d5-a783-88a95ae66df6\") " pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:45 crc kubenswrapper[4900]: I0127 12:38:45.060952 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:45 crc kubenswrapper[4900]: E0127 12:38:45.521248 4900 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-cbtnw_openshift-operators_b925e9c1-ac78-41d5-a783-88a95ae66df6_0(92bc7b662d2c04b2c32cdd9858452ae107e396e8318e44f25130e110a41fcc7f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 27 12:38:45 crc kubenswrapper[4900]: E0127 12:38:45.521333 4900 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-cbtnw_openshift-operators_b925e9c1-ac78-41d5-a783-88a95ae66df6_0(92bc7b662d2c04b2c32cdd9858452ae107e396e8318e44f25130e110a41fcc7f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:45 crc kubenswrapper[4900]: E0127 12:38:45.521357 4900 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-cbtnw_openshift-operators_b925e9c1-ac78-41d5-a783-88a95ae66df6_0(92bc7b662d2c04b2c32cdd9858452ae107e396e8318e44f25130e110a41fcc7f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:45 crc kubenswrapper[4900]: E0127 12:38:45.521413 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5bf474d74f-cbtnw_openshift-operators(b925e9c1-ac78-41d5-a783-88a95ae66df6)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5bf474d74f-cbtnw_openshift-operators(b925e9c1-ac78-41d5-a783-88a95ae66df6)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-cbtnw_openshift-operators_b925e9c1-ac78-41d5-a783-88a95ae66df6_0(92bc7b662d2c04b2c32cdd9858452ae107e396e8318e44f25130e110a41fcc7f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" podUID="b925e9c1-ac78-41d5-a783-88a95ae66df6" Jan 27 12:38:45 crc kubenswrapper[4900]: I0127 12:38:45.940708 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:45 crc kubenswrapper[4900]: I0127 12:38:45.994880 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:46 crc kubenswrapper[4900]: I0127 12:38:46.191288 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" event={"ID":"f09de7b8-4cf1-47f1-9e4b-922f0588c458","Type":"ContainerStarted","Data":"89a34d79e346620bd87a0ee4ec89c6a836d473bfab2d657a61c36b5cfdea14d7"} Jan 27 12:38:46 crc kubenswrapper[4900]: I0127 12:38:46.231962 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" podStartSLOduration=10.231942388 podStartE2EDuration="10.231942388s" podCreationTimestamp="2026-01-27 12:38:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:38:46.2309565 +0000 UTC m=+753.467984700" watchObservedRunningTime="2026-01-27 12:38:46.231942388 +0000 UTC m=+753.468970598" Jan 27 12:38:46 crc kubenswrapper[4900]: I0127 12:38:46.347545 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2pmrh"] Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.198566 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2pmrh" podUID="ae12c605-62a3-49ee-86ad-21a6566672f7" containerName="registry-server" containerID="cri-o://6818abf421c51e5c914e873983efa2ce22664447c85b2bd124f0c3dc8b33dd2c" gracePeriod=2 Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.199094 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.199128 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.199140 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.241772 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.261760 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.490496 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.510895 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae12c605-62a3-49ee-86ad-21a6566672f7-catalog-content\") pod \"ae12c605-62a3-49ee-86ad-21a6566672f7\" (UID: \"ae12c605-62a3-49ee-86ad-21a6566672f7\") " Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.511117 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfkhj\" (UniqueName: \"kubernetes.io/projected/ae12c605-62a3-49ee-86ad-21a6566672f7-kube-api-access-zfkhj\") pod \"ae12c605-62a3-49ee-86ad-21a6566672f7\" (UID: \"ae12c605-62a3-49ee-86ad-21a6566672f7\") " Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.511287 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae12c605-62a3-49ee-86ad-21a6566672f7-utilities\") pod \"ae12c605-62a3-49ee-86ad-21a6566672f7\" (UID: \"ae12c605-62a3-49ee-86ad-21a6566672f7\") " Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.538909 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae12c605-62a3-49ee-86ad-21a6566672f7-utilities" (OuterVolumeSpecName: "utilities") pod "ae12c605-62a3-49ee-86ad-21a6566672f7" (UID: "ae12c605-62a3-49ee-86ad-21a6566672f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.545754 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae12c605-62a3-49ee-86ad-21a6566672f7-kube-api-access-zfkhj" (OuterVolumeSpecName: "kube-api-access-zfkhj") pod "ae12c605-62a3-49ee-86ad-21a6566672f7" (UID: "ae12c605-62a3-49ee-86ad-21a6566672f7"). InnerVolumeSpecName "kube-api-access-zfkhj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.619530 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfkhj\" (UniqueName: \"kubernetes.io/projected/ae12c605-62a3-49ee-86ad-21a6566672f7-kube-api-access-zfkhj\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.619560 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae12c605-62a3-49ee-86ad-21a6566672f7-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.731320 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae12c605-62a3-49ee-86ad-21a6566672f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ae12c605-62a3-49ee-86ad-21a6566672f7" (UID: "ae12c605-62a3-49ee-86ad-21a6566672f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.822602 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae12c605-62a3-49ee-86ad-21a6566672f7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.917703 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn"] Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.917892 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" Jan 27 12:38:47 crc kubenswrapper[4900]: I0127 12:38:47.918485 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.016741 4900 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-76fmn_openshift-operators_61aa23bf-0ece-4bf6-a963-542bd8b399c6_0(02ca84293da2ee95f01beea6dad91edf39081c4d3e7c6ad3b0796ff373b11599): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.016869 4900 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-76fmn_openshift-operators_61aa23bf-0ece-4bf6-a963-542bd8b399c6_0(02ca84293da2ee95f01beea6dad91edf39081c4d3e7c6ad3b0796ff373b11599): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.016913 4900 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-76fmn_openshift-operators_61aa23bf-0ece-4bf6-a963-542bd8b399c6_0(02ca84293da2ee95f01beea6dad91edf39081c4d3e7c6ad3b0796ff373b11599): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.017022 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-68bc856cb9-76fmn_openshift-operators(61aa23bf-0ece-4bf6-a963-542bd8b399c6)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-68bc856cb9-76fmn_openshift-operators(61aa23bf-0ece-4bf6-a963-542bd8b399c6)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-76fmn_openshift-operators_61aa23bf-0ece-4bf6-a963-542bd8b399c6_0(02ca84293da2ee95f01beea6dad91edf39081c4d3e7c6ad3b0796ff373b11599): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" podUID="61aa23bf-0ece-4bf6-a963-542bd8b399c6" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.034240 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-p424v"] Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.034517 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.035551 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.054031 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk"] Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.054348 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.055261 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.070614 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-cbtnw"] Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.070757 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.071591 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.140624 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js"] Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.140833 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.141524 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.185306 4900 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-p424v_openshift-operators_6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990_0(addd2f1c3878a6af5ce97d5bead0336246d32513ed1f2beb2adfcd35698bc286): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.185756 4900 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-p424v_openshift-operators_6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990_0(addd2f1c3878a6af5ce97d5bead0336246d32513ed1f2beb2adfcd35698bc286): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.185805 4900 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-p424v_openshift-operators_6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990_0(addd2f1c3878a6af5ce97d5bead0336246d32513ed1f2beb2adfcd35698bc286): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.185879 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-59bdc8b94-p424v_openshift-operators(6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-59bdc8b94-p424v_openshift-operators(6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-p424v_openshift-operators_6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990_0(addd2f1c3878a6af5ce97d5bead0336246d32513ed1f2beb2adfcd35698bc286): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-59bdc8b94-p424v" podUID="6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.236375 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2pmrh" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.237925 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pmrh" event={"ID":"ae12c605-62a3-49ee-86ad-21a6566672f7","Type":"ContainerDied","Data":"6818abf421c51e5c914e873983efa2ce22664447c85b2bd124f0c3dc8b33dd2c"} Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.238046 4900 scope.go:117] "RemoveContainer" containerID="6818abf421c51e5c914e873983efa2ce22664447c85b2bd124f0c3dc8b33dd2c" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.244599 4900 generic.go:334] "Generic (PLEG): container finished" podID="ae12c605-62a3-49ee-86ad-21a6566672f7" containerID="6818abf421c51e5c914e873983efa2ce22664447c85b2bd124f0c3dc8b33dd2c" exitCode=0 Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.244900 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pmrh" event={"ID":"ae12c605-62a3-49ee-86ad-21a6566672f7","Type":"ContainerDied","Data":"8a02fcd526b403b937d86c51e72090c36b5a20f7634ba335196eb86c210f0eb7"} Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.264434 4900 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-mhmkk_openshift-operators_25cea2e4-2822-42b8-bd98-6a7f99e69c75_0(4fd93aad356ba2a7bbf078c684ce143edb418c3a5e288b1df64f98a564d4eac0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.264575 4900 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-mhmkk_openshift-operators_25cea2e4-2822-42b8-bd98-6a7f99e69c75_0(4fd93aad356ba2a7bbf078c684ce143edb418c3a5e288b1df64f98a564d4eac0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.264620 4900 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-mhmkk_openshift-operators_25cea2e4-2822-42b8-bd98-6a7f99e69c75_0(4fd93aad356ba2a7bbf078c684ce143edb418c3a5e288b1df64f98a564d4eac0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.264724 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5f87745447-mhmkk_openshift-operators(25cea2e4-2822-42b8-bd98-6a7f99e69c75)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5f87745447-mhmkk_openshift-operators(25cea2e4-2822-42b8-bd98-6a7f99e69c75)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-mhmkk_openshift-operators_25cea2e4-2822-42b8-bd98-6a7f99e69c75_0(4fd93aad356ba2a7bbf078c684ce143edb418c3a5e288b1df64f98a564d4eac0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" podUID="25cea2e4-2822-42b8-bd98-6a7f99e69c75" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.281921 4900 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-sn6js_openshift-operators_70cde964-7f6b-42e4-83f2-87e67664e70c_0(aba003959dc209cc148baa4aab1a9602d5a8cb4ef6a45f6492a06c93f02ee7ee): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.282034 4900 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-sn6js_openshift-operators_70cde964-7f6b-42e4-83f2-87e67664e70c_0(aba003959dc209cc148baa4aab1a9602d5a8cb4ef6a45f6492a06c93f02ee7ee): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.282104 4900 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-sn6js_openshift-operators_70cde964-7f6b-42e4-83f2-87e67664e70c_0(aba003959dc209cc148baa4aab1a9602d5a8cb4ef6a45f6492a06c93f02ee7ee): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.282188 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5f87745447-sn6js_openshift-operators(70cde964-7f6b-42e4-83f2-87e67664e70c)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5f87745447-sn6js_openshift-operators(70cde964-7f6b-42e4-83f2-87e67664e70c)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5f87745447-sn6js_openshift-operators_70cde964-7f6b-42e4-83f2-87e67664e70c_0(aba003959dc209cc148baa4aab1a9602d5a8cb4ef6a45f6492a06c93f02ee7ee): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" podUID="70cde964-7f6b-42e4-83f2-87e67664e70c" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.297730 4900 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-cbtnw_openshift-operators_b925e9c1-ac78-41d5-a783-88a95ae66df6_0(943b78d5f1e94b4ce22463106f2aca85e239ed51a2cb7ef4809849d4c6c6460f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.297851 4900 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-cbtnw_openshift-operators_b925e9c1-ac78-41d5-a783-88a95ae66df6_0(943b78d5f1e94b4ce22463106f2aca85e239ed51a2cb7ef4809849d4c6c6460f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.297888 4900 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-cbtnw_openshift-operators_b925e9c1-ac78-41d5-a783-88a95ae66df6_0(943b78d5f1e94b4ce22463106f2aca85e239ed51a2cb7ef4809849d4c6c6460f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.297940 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5bf474d74f-cbtnw_openshift-operators(b925e9c1-ac78-41d5-a783-88a95ae66df6)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5bf474d74f-cbtnw_openshift-operators(b925e9c1-ac78-41d5-a783-88a95ae66df6)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-cbtnw_openshift-operators_b925e9c1-ac78-41d5-a783-88a95ae66df6_0(943b78d5f1e94b4ce22463106f2aca85e239ed51a2cb7ef4809849d4c6c6460f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" podUID="b925e9c1-ac78-41d5-a783-88a95ae66df6" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.322572 4900 scope.go:117] "RemoveContainer" containerID="f00f32f7c97cd3ef34e64532c6a2ac272acd887c4315be32f67b22c8b012e939" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.323172 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2pmrh"] Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.328495 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2pmrh"] Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.372965 4900 scope.go:117] "RemoveContainer" containerID="f40787e1bcb1bff551be516b29ab49da84e911ce59ca174afe12c7de92e28402" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.406831 4900 scope.go:117] "RemoveContainer" containerID="6818abf421c51e5c914e873983efa2ce22664447c85b2bd124f0c3dc8b33dd2c" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.408020 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6818abf421c51e5c914e873983efa2ce22664447c85b2bd124f0c3dc8b33dd2c\": container with ID starting with 6818abf421c51e5c914e873983efa2ce22664447c85b2bd124f0c3dc8b33dd2c not found: ID does not exist" containerID="6818abf421c51e5c914e873983efa2ce22664447c85b2bd124f0c3dc8b33dd2c" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.408102 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6818abf421c51e5c914e873983efa2ce22664447c85b2bd124f0c3dc8b33dd2c"} err="failed to get container status \"6818abf421c51e5c914e873983efa2ce22664447c85b2bd124f0c3dc8b33dd2c\": rpc error: code = NotFound desc = could not find container \"6818abf421c51e5c914e873983efa2ce22664447c85b2bd124f0c3dc8b33dd2c\": container with ID starting with 6818abf421c51e5c914e873983efa2ce22664447c85b2bd124f0c3dc8b33dd2c not found: ID does not exist" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.408146 4900 scope.go:117] "RemoveContainer" containerID="f00f32f7c97cd3ef34e64532c6a2ac272acd887c4315be32f67b22c8b012e939" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.408894 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f00f32f7c97cd3ef34e64532c6a2ac272acd887c4315be32f67b22c8b012e939\": container with ID starting with f00f32f7c97cd3ef34e64532c6a2ac272acd887c4315be32f67b22c8b012e939 not found: ID does not exist" containerID="f00f32f7c97cd3ef34e64532c6a2ac272acd887c4315be32f67b22c8b012e939" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.408981 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f00f32f7c97cd3ef34e64532c6a2ac272acd887c4315be32f67b22c8b012e939"} err="failed to get container status \"f00f32f7c97cd3ef34e64532c6a2ac272acd887c4315be32f67b22c8b012e939\": rpc error: code = NotFound desc = could not find container \"f00f32f7c97cd3ef34e64532c6a2ac272acd887c4315be32f67b22c8b012e939\": container with ID starting with f00f32f7c97cd3ef34e64532c6a2ac272acd887c4315be32f67b22c8b012e939 not found: ID does not exist" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.409038 4900 scope.go:117] "RemoveContainer" containerID="f40787e1bcb1bff551be516b29ab49da84e911ce59ca174afe12c7de92e28402" Jan 27 12:38:48 crc kubenswrapper[4900]: E0127 12:38:48.409590 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f40787e1bcb1bff551be516b29ab49da84e911ce59ca174afe12c7de92e28402\": container with ID starting with f40787e1bcb1bff551be516b29ab49da84e911ce59ca174afe12c7de92e28402 not found: ID does not exist" containerID="f40787e1bcb1bff551be516b29ab49da84e911ce59ca174afe12c7de92e28402" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.409653 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f40787e1bcb1bff551be516b29ab49da84e911ce59ca174afe12c7de92e28402"} err="failed to get container status \"f40787e1bcb1bff551be516b29ab49da84e911ce59ca174afe12c7de92e28402\": rpc error: code = NotFound desc = could not find container \"f40787e1bcb1bff551be516b29ab49da84e911ce59ca174afe12c7de92e28402\": container with ID starting with f40787e1bcb1bff551be516b29ab49da84e911ce59ca174afe12c7de92e28402 not found: ID does not exist" Jan 27 12:38:48 crc kubenswrapper[4900]: I0127 12:38:48.492323 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae12c605-62a3-49ee-86ad-21a6566672f7" path="/var/lib/kubelet/pods/ae12c605-62a3-49ee-86ad-21a6566672f7/volumes" Jan 27 12:38:52 crc kubenswrapper[4900]: I0127 12:38:52.372885 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:38:52 crc kubenswrapper[4900]: I0127 12:38:52.373602 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:38:59 crc kubenswrapper[4900]: I0127 12:38:59.481678 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:59 crc kubenswrapper[4900]: I0127 12:38:59.481689 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" Jan 27 12:38:59 crc kubenswrapper[4900]: I0127 12:38:59.482680 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:38:59 crc kubenswrapper[4900]: I0127 12:38:59.482748 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" Jan 27 12:38:59 crc kubenswrapper[4900]: I0127 12:38:59.823018 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-cbtnw"] Jan 27 12:38:59 crc kubenswrapper[4900]: W0127 12:38:59.831036 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb925e9c1_ac78_41d5_a783_88a95ae66df6.slice/crio-49fd783b8915f41483703161d39b65af8387f0c2fd586382e0dd67bbc2feab18 WatchSource:0}: Error finding container 49fd783b8915f41483703161d39b65af8387f0c2fd586382e0dd67bbc2feab18: Status 404 returned error can't find the container with id 49fd783b8915f41483703161d39b65af8387f0c2fd586382e0dd67bbc2feab18 Jan 27 12:39:00 crc kubenswrapper[4900]: I0127 12:39:00.079135 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn"] Jan 27 12:39:00 crc kubenswrapper[4900]: I0127 12:39:00.321721 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" event={"ID":"b925e9c1-ac78-41d5-a783-88a95ae66df6","Type":"ContainerStarted","Data":"49fd783b8915f41483703161d39b65af8387f0c2fd586382e0dd67bbc2feab18"} Jan 27 12:39:00 crc kubenswrapper[4900]: I0127 12:39:00.322568 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" event={"ID":"61aa23bf-0ece-4bf6-a963-542bd8b399c6","Type":"ContainerStarted","Data":"c42862eaeac2e3d5147ee546e04aea1ae59dc26f27bbb7606fbdae80fe494d55"} Jan 27 12:39:00 crc kubenswrapper[4900]: I0127 12:39:00.481036 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:39:00 crc kubenswrapper[4900]: I0127 12:39:00.482320 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:39:00 crc kubenswrapper[4900]: I0127 12:39:00.687263 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-p424v"] Jan 27 12:39:00 crc kubenswrapper[4900]: W0127 12:39:00.692827 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6ccb6d6c_6d83_4ac9_80ae_7ab1f66fc990.slice/crio-c3a29345cad011f39be8f83f04eb5eac8c9a229d0261f0dab0c3f4bfdd60c2a4 WatchSource:0}: Error finding container c3a29345cad011f39be8f83f04eb5eac8c9a229d0261f0dab0c3f4bfdd60c2a4: Status 404 returned error can't find the container with id c3a29345cad011f39be8f83f04eb5eac8c9a229d0261f0dab0c3f4bfdd60c2a4 Jan 27 12:39:01 crc kubenswrapper[4900]: I0127 12:39:01.332657 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-p424v" event={"ID":"6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990","Type":"ContainerStarted","Data":"c3a29345cad011f39be8f83f04eb5eac8c9a229d0261f0dab0c3f4bfdd60c2a4"} Jan 27 12:39:03 crc kubenswrapper[4900]: I0127 12:39:03.481455 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:39:03 crc kubenswrapper[4900]: I0127 12:39:03.483714 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" Jan 27 12:39:03 crc kubenswrapper[4900]: I0127 12:39:03.484542 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:39:03 crc kubenswrapper[4900]: I0127 12:39:03.484920 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" Jan 27 12:39:04 crc kubenswrapper[4900]: I0127 12:39:04.037910 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js"] Jan 27 12:39:04 crc kubenswrapper[4900]: W0127 12:39:04.059554 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod70cde964_7f6b_42e4_83f2_87e67664e70c.slice/crio-ffc7ab31cb8253efab04fc2ecdf7b617e3f6bd9a18cc9d87eae74d521c23a709 WatchSource:0}: Error finding container ffc7ab31cb8253efab04fc2ecdf7b617e3f6bd9a18cc9d87eae74d521c23a709: Status 404 returned error can't find the container with id ffc7ab31cb8253efab04fc2ecdf7b617e3f6bd9a18cc9d87eae74d521c23a709 Jan 27 12:39:04 crc kubenswrapper[4900]: I0127 12:39:04.064723 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk"] Jan 27 12:39:04 crc kubenswrapper[4900]: W0127 12:39:04.081483 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod25cea2e4_2822_42b8_bd98_6a7f99e69c75.slice/crio-2e651fce81567ae24aa91d7eedf0d4f450b84cb58cdfeb7f6c7f2ec237dc939b WatchSource:0}: Error finding container 2e651fce81567ae24aa91d7eedf0d4f450b84cb58cdfeb7f6c7f2ec237dc939b: Status 404 returned error can't find the container with id 2e651fce81567ae24aa91d7eedf0d4f450b84cb58cdfeb7f6c7f2ec237dc939b Jan 27 12:39:04 crc kubenswrapper[4900]: I0127 12:39:04.384397 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" event={"ID":"70cde964-7f6b-42e4-83f2-87e67664e70c","Type":"ContainerStarted","Data":"ffc7ab31cb8253efab04fc2ecdf7b617e3f6bd9a18cc9d87eae74d521c23a709"} Jan 27 12:39:04 crc kubenswrapper[4900]: I0127 12:39:04.392402 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" event={"ID":"25cea2e4-2822-42b8-bd98-6a7f99e69c75","Type":"ContainerStarted","Data":"2e651fce81567ae24aa91d7eedf0d4f450b84cb58cdfeb7f6c7f2ec237dc939b"} Jan 27 12:39:07 crc kubenswrapper[4900]: I0127 12:39:07.647574 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-6m7fv" Jan 27 12:39:16 crc kubenswrapper[4900]: I0127 12:39:16.597472 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-p424v" event={"ID":"6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990","Type":"ContainerStarted","Data":"bd2fa1c5668b32b508867d16c6cc617614bd2784254afc59ee8a854dd129c996"} Jan 27 12:39:16 crc kubenswrapper[4900]: I0127 12:39:16.598525 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:39:16 crc kubenswrapper[4900]: I0127 12:39:16.608779 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" event={"ID":"b925e9c1-ac78-41d5-a783-88a95ae66df6","Type":"ContainerStarted","Data":"d7dc38151b434afdb379ca85857acefe23fda5ff46f767e720e651bef695af25"} Jan 27 12:39:16 crc kubenswrapper[4900]: I0127 12:39:16.608962 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:39:16 crc kubenswrapper[4900]: I0127 12:39:16.611707 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" event={"ID":"70cde964-7f6b-42e4-83f2-87e67664e70c","Type":"ContainerStarted","Data":"9841ded1d32ecaec27c1cc780c9853dadce018f80fe94a263c1bd545b0011901"} Jan 27 12:39:16 crc kubenswrapper[4900]: I0127 12:39:16.613744 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" event={"ID":"61aa23bf-0ece-4bf6-a963-542bd8b399c6","Type":"ContainerStarted","Data":"c68ea90c47550f5c14a45e1e8f814690e09f79db62803f388e23180c85ce88d8"} Jan 27 12:39:16 crc kubenswrapper[4900]: I0127 12:39:16.615589 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" event={"ID":"25cea2e4-2822-42b8-bd98-6a7f99e69c75","Type":"ContainerStarted","Data":"01cda646f5238aed456bef35351813926d8360109627893f470ac0533125ecc0"} Jan 27 12:39:16 crc kubenswrapper[4900]: I0127 12:39:16.624510 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-p424v" podStartSLOduration=18.070402336 podStartE2EDuration="32.624481784s" podCreationTimestamp="2026-01-27 12:38:44 +0000 UTC" firstStartedPulling="2026-01-27 12:39:00.695267655 +0000 UTC m=+767.932295865" lastFinishedPulling="2026-01-27 12:39:15.249347103 +0000 UTC m=+782.486375313" observedRunningTime="2026-01-27 12:39:16.621013924 +0000 UTC m=+783.858042154" watchObservedRunningTime="2026-01-27 12:39:16.624481784 +0000 UTC m=+783.861509994" Jan 27 12:39:16 crc kubenswrapper[4900]: I0127 12:39:16.647820 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" podStartSLOduration=17.273250085 podStartE2EDuration="32.647791532s" podCreationTimestamp="2026-01-27 12:38:44 +0000 UTC" firstStartedPulling="2026-01-27 12:38:59.837402264 +0000 UTC m=+767.074430474" lastFinishedPulling="2026-01-27 12:39:15.211943711 +0000 UTC m=+782.448971921" observedRunningTime="2026-01-27 12:39:16.643910121 +0000 UTC m=+783.880938341" watchObservedRunningTime="2026-01-27 12:39:16.647791532 +0000 UTC m=+783.884819742" Jan 27 12:39:16 crc kubenswrapper[4900]: I0127 12:39:16.669386 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-sn6js" podStartSLOduration=20.828177123 podStartE2EDuration="32.66935552s" podCreationTimestamp="2026-01-27 12:38:44 +0000 UTC" firstStartedPulling="2026-01-27 12:39:04.063208824 +0000 UTC m=+771.300237034" lastFinishedPulling="2026-01-27 12:39:15.904387221 +0000 UTC m=+783.141415431" observedRunningTime="2026-01-27 12:39:16.659527839 +0000 UTC m=+783.896556049" watchObservedRunningTime="2026-01-27 12:39:16.66935552 +0000 UTC m=+783.906383730" Jan 27 12:39:16 crc kubenswrapper[4900]: I0127 12:39:16.679795 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5f87745447-mhmkk" podStartSLOduration=21.14199797 podStartE2EDuration="32.679750558s" podCreationTimestamp="2026-01-27 12:38:44 +0000 UTC" firstStartedPulling="2026-01-27 12:39:04.085169594 +0000 UTC m=+771.322197804" lastFinishedPulling="2026-01-27 12:39:15.622922182 +0000 UTC m=+782.859950392" observedRunningTime="2026-01-27 12:39:16.675584099 +0000 UTC m=+783.912612329" watchObservedRunningTime="2026-01-27 12:39:16.679750558 +0000 UTC m=+783.916778768" Jan 27 12:39:16 crc kubenswrapper[4900]: I0127 12:39:16.715278 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-76fmn" podStartSLOduration=17.597534851 podStartE2EDuration="32.715256226s" podCreationTimestamp="2026-01-27 12:38:44 +0000 UTC" firstStartedPulling="2026-01-27 12:39:00.095623176 +0000 UTC m=+767.332651386" lastFinishedPulling="2026-01-27 12:39:15.213344551 +0000 UTC m=+782.450372761" observedRunningTime="2026-01-27 12:39:16.711409486 +0000 UTC m=+783.948437696" watchObservedRunningTime="2026-01-27 12:39:16.715256226 +0000 UTC m=+783.952284436" Jan 27 12:39:17 crc kubenswrapper[4900]: I0127 12:39:17.640385 4900 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-p424v container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.8:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 12:39:17 crc kubenswrapper[4900]: I0127 12:39:17.640483 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-59bdc8b94-p424v" podUID="6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.8:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 12:39:18 crc kubenswrapper[4900]: I0127 12:39:18.123862 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-p424v" Jan 27 12:39:22 crc kubenswrapper[4900]: I0127 12:39:22.372707 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:39:22 crc kubenswrapper[4900]: I0127 12:39:22.373406 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:39:25 crc kubenswrapper[4900]: I0127 12:39:25.063618 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.404471 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-zsgft"] Jan 27 12:39:27 crc kubenswrapper[4900]: E0127 12:39:27.405308 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae12c605-62a3-49ee-86ad-21a6566672f7" containerName="registry-server" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.405325 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae12c605-62a3-49ee-86ad-21a6566672f7" containerName="registry-server" Jan 27 12:39:27 crc kubenswrapper[4900]: E0127 12:39:27.405337 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae12c605-62a3-49ee-86ad-21a6566672f7" containerName="extract-content" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.405343 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae12c605-62a3-49ee-86ad-21a6566672f7" containerName="extract-content" Jan 27 12:39:27 crc kubenswrapper[4900]: E0127 12:39:27.405357 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae12c605-62a3-49ee-86ad-21a6566672f7" containerName="extract-utilities" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.405365 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae12c605-62a3-49ee-86ad-21a6566672f7" containerName="extract-utilities" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.405496 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae12c605-62a3-49ee-86ad-21a6566672f7" containerName="registry-server" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.405959 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-zsgft" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.411869 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.411925 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.412146 4900 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-gs4kf" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.420569 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-zsgft"] Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.479283 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-vpcz2"] Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.480786 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-vpcz2" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.483779 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fx8xc\" (UniqueName: \"kubernetes.io/projected/10917301-3371-4ce8-9095-dd86dd3d8d70-kube-api-access-fx8xc\") pod \"cert-manager-cainjector-cf98fcc89-zsgft\" (UID: \"10917301-3371-4ce8-9095-dd86dd3d8d70\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-zsgft" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.488582 4900 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-rdz88" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.510145 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-vpcz2"] Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.548150 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-4f2kx"] Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.549454 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-4f2kx" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.556674 4900 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-22n4s" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.570747 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-4f2kx"] Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.586417 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fx8xc\" (UniqueName: \"kubernetes.io/projected/10917301-3371-4ce8-9095-dd86dd3d8d70-kube-api-access-fx8xc\") pod \"cert-manager-cainjector-cf98fcc89-zsgft\" (UID: \"10917301-3371-4ce8-9095-dd86dd3d8d70\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-zsgft" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.586513 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5fmc\" (UniqueName: \"kubernetes.io/projected/149bd65b-2a7d-4d05-9dfc-6214043f664e-kube-api-access-g5fmc\") pod \"cert-manager-858654f9db-vpcz2\" (UID: \"149bd65b-2a7d-4d05-9dfc-6214043f664e\") " pod="cert-manager/cert-manager-858654f9db-vpcz2" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.586602 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfgvm\" (UniqueName: \"kubernetes.io/projected/c35e074b-0e8e-4d1f-8d2c-5c23cf320f25-kube-api-access-qfgvm\") pod \"cert-manager-webhook-687f57d79b-4f2kx\" (UID: \"c35e074b-0e8e-4d1f-8d2c-5c23cf320f25\") " pod="cert-manager/cert-manager-webhook-687f57d79b-4f2kx" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.622788 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fx8xc\" (UniqueName: \"kubernetes.io/projected/10917301-3371-4ce8-9095-dd86dd3d8d70-kube-api-access-fx8xc\") pod \"cert-manager-cainjector-cf98fcc89-zsgft\" (UID: \"10917301-3371-4ce8-9095-dd86dd3d8d70\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-zsgft" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.687664 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfgvm\" (UniqueName: \"kubernetes.io/projected/c35e074b-0e8e-4d1f-8d2c-5c23cf320f25-kube-api-access-qfgvm\") pod \"cert-manager-webhook-687f57d79b-4f2kx\" (UID: \"c35e074b-0e8e-4d1f-8d2c-5c23cf320f25\") " pod="cert-manager/cert-manager-webhook-687f57d79b-4f2kx" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.687814 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5fmc\" (UniqueName: \"kubernetes.io/projected/149bd65b-2a7d-4d05-9dfc-6214043f664e-kube-api-access-g5fmc\") pod \"cert-manager-858654f9db-vpcz2\" (UID: \"149bd65b-2a7d-4d05-9dfc-6214043f664e\") " pod="cert-manager/cert-manager-858654f9db-vpcz2" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.718274 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5fmc\" (UniqueName: \"kubernetes.io/projected/149bd65b-2a7d-4d05-9dfc-6214043f664e-kube-api-access-g5fmc\") pod \"cert-manager-858654f9db-vpcz2\" (UID: \"149bd65b-2a7d-4d05-9dfc-6214043f664e\") " pod="cert-manager/cert-manager-858654f9db-vpcz2" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.718336 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfgvm\" (UniqueName: \"kubernetes.io/projected/c35e074b-0e8e-4d1f-8d2c-5c23cf320f25-kube-api-access-qfgvm\") pod \"cert-manager-webhook-687f57d79b-4f2kx\" (UID: \"c35e074b-0e8e-4d1f-8d2c-5c23cf320f25\") " pod="cert-manager/cert-manager-webhook-687f57d79b-4f2kx" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.730769 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-zsgft" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.809616 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-vpcz2" Jan 27 12:39:27 crc kubenswrapper[4900]: I0127 12:39:27.873730 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-4f2kx" Jan 27 12:39:28 crc kubenswrapper[4900]: I0127 12:39:28.807502 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-4f2kx"] Jan 27 12:39:28 crc kubenswrapper[4900]: W0127 12:39:28.815257 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc35e074b_0e8e_4d1f_8d2c_5c23cf320f25.slice/crio-71f5513ca327126de8bfebf4ae3b392f88c2d01562e24ab6f21b2c4351e7fd0b WatchSource:0}: Error finding container 71f5513ca327126de8bfebf4ae3b392f88c2d01562e24ab6f21b2c4351e7fd0b: Status 404 returned error can't find the container with id 71f5513ca327126de8bfebf4ae3b392f88c2d01562e24ab6f21b2c4351e7fd0b Jan 27 12:39:29 crc kubenswrapper[4900]: I0127 12:39:29.000151 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-vpcz2"] Jan 27 12:39:29 crc kubenswrapper[4900]: I0127 12:39:29.092051 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-zsgft"] Jan 27 12:39:29 crc kubenswrapper[4900]: I0127 12:39:29.717651 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-zsgft" event={"ID":"10917301-3371-4ce8-9095-dd86dd3d8d70","Type":"ContainerStarted","Data":"2c462a09aba38fe605ce5a1ae858f915c9a829f5d4b1405ebad5e97dd219a8bb"} Jan 27 12:39:29 crc kubenswrapper[4900]: I0127 12:39:29.719223 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-vpcz2" event={"ID":"149bd65b-2a7d-4d05-9dfc-6214043f664e","Type":"ContainerStarted","Data":"b1ea099e54af6222b90e1e33a0928146a65b9fd0191f41025188f5d9371591ee"} Jan 27 12:39:29 crc kubenswrapper[4900]: I0127 12:39:29.721181 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-4f2kx" event={"ID":"c35e074b-0e8e-4d1f-8d2c-5c23cf320f25","Type":"ContainerStarted","Data":"71f5513ca327126de8bfebf4ae3b392f88c2d01562e24ab6f21b2c4351e7fd0b"} Jan 27 12:39:38 crc kubenswrapper[4900]: I0127 12:39:38.811088 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-vpcz2" event={"ID":"149bd65b-2a7d-4d05-9dfc-6214043f664e","Type":"ContainerStarted","Data":"3820de3d32775565fe8f450bc3d69acebf8e2d6c79e297529addf20c84f9525a"} Jan 27 12:39:38 crc kubenswrapper[4900]: I0127 12:39:38.813634 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-4f2kx" event={"ID":"c35e074b-0e8e-4d1f-8d2c-5c23cf320f25","Type":"ContainerStarted","Data":"6d8dce39ebea35c22dfa3ed6f116ff62f373140a010eda6aa75d36419dfea598"} Jan 27 12:39:38 crc kubenswrapper[4900]: I0127 12:39:38.813805 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-4f2kx" Jan 27 12:39:38 crc kubenswrapper[4900]: I0127 12:39:38.815777 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-zsgft" event={"ID":"10917301-3371-4ce8-9095-dd86dd3d8d70","Type":"ContainerStarted","Data":"b3bc2ae0908e83f4eae6efba7f26e27340776ba08cd6f09ae7619ba7f352216a"} Jan 27 12:39:38 crc kubenswrapper[4900]: I0127 12:39:38.848418 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-vpcz2" podStartSLOduration=3.271675666 podStartE2EDuration="11.848384842s" podCreationTimestamp="2026-01-27 12:39:27 +0000 UTC" firstStartedPulling="2026-01-27 12:39:29.047125642 +0000 UTC m=+796.284153852" lastFinishedPulling="2026-01-27 12:39:37.623834818 +0000 UTC m=+804.860863028" observedRunningTime="2026-01-27 12:39:38.845911621 +0000 UTC m=+806.082939841" watchObservedRunningTime="2026-01-27 12:39:38.848384842 +0000 UTC m=+806.085413062" Jan 27 12:39:38 crc kubenswrapper[4900]: I0127 12:39:38.910912 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-zsgft" podStartSLOduration=3.431744794 podStartE2EDuration="11.910877683s" podCreationTimestamp="2026-01-27 12:39:27 +0000 UTC" firstStartedPulling="2026-01-27 12:39:29.068361 +0000 UTC m=+796.305389210" lastFinishedPulling="2026-01-27 12:39:37.547493889 +0000 UTC m=+804.784522099" observedRunningTime="2026-01-27 12:39:38.876781466 +0000 UTC m=+806.113809676" watchObservedRunningTime="2026-01-27 12:39:38.910877683 +0000 UTC m=+806.147905893" Jan 27 12:39:42 crc kubenswrapper[4900]: I0127 12:39:42.885175 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-4f2kx" Jan 27 12:39:42 crc kubenswrapper[4900]: I0127 12:39:42.909193 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-4f2kx" podStartSLOduration=7.180289913 podStartE2EDuration="15.909166041s" podCreationTimestamp="2026-01-27 12:39:27 +0000 UTC" firstStartedPulling="2026-01-27 12:39:28.818137867 +0000 UTC m=+796.055166077" lastFinishedPulling="2026-01-27 12:39:37.547013995 +0000 UTC m=+804.784042205" observedRunningTime="2026-01-27 12:39:38.919556642 +0000 UTC m=+806.156584872" watchObservedRunningTime="2026-01-27 12:39:42.909166041 +0000 UTC m=+810.146194251" Jan 27 12:39:52 crc kubenswrapper[4900]: I0127 12:39:52.372569 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:39:52 crc kubenswrapper[4900]: I0127 12:39:52.373582 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:39:52 crc kubenswrapper[4900]: I0127 12:39:52.373686 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:39:52 crc kubenswrapper[4900]: I0127 12:39:52.374864 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"13cb1cf2412c834432c235dfcd87c0da123068bd752d1f31f0e730cd7a97b24e"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 12:39:52 crc kubenswrapper[4900]: I0127 12:39:52.374949 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://13cb1cf2412c834432c235dfcd87c0da123068bd752d1f31f0e730cd7a97b24e" gracePeriod=600 Jan 27 12:39:52 crc kubenswrapper[4900]: I0127 12:39:52.946419 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="13cb1cf2412c834432c235dfcd87c0da123068bd752d1f31f0e730cd7a97b24e" exitCode=0 Jan 27 12:39:52 crc kubenswrapper[4900]: I0127 12:39:52.946482 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"13cb1cf2412c834432c235dfcd87c0da123068bd752d1f31f0e730cd7a97b24e"} Jan 27 12:39:52 crc kubenswrapper[4900]: I0127 12:39:52.946558 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"3985ee48c72db877d5168b811e894771c936712a44e12eed08928d84a04baff9"} Jan 27 12:39:52 crc kubenswrapper[4900]: I0127 12:39:52.946594 4900 scope.go:117] "RemoveContainer" containerID="69e7e31d6ba683ecd2f02cfbc9899542c457d918143ad827391c0c3e681ae608" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.303048 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p"] Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.306212 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.309678 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.326022 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p"] Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.478764 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fec10fd5-c022-46d6-bc89-53e69e2c0b40-bundle\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p\" (UID: \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.478910 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnv6f\" (UniqueName: \"kubernetes.io/projected/fec10fd5-c022-46d6-bc89-53e69e2c0b40-kube-api-access-mnv6f\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p\" (UID: \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.479164 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fec10fd5-c022-46d6-bc89-53e69e2c0b40-util\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p\" (UID: \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.581024 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnv6f\" (UniqueName: \"kubernetes.io/projected/fec10fd5-c022-46d6-bc89-53e69e2c0b40-kube-api-access-mnv6f\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p\" (UID: \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.581162 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fec10fd5-c022-46d6-bc89-53e69e2c0b40-util\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p\" (UID: \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.581212 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fec10fd5-c022-46d6-bc89-53e69e2c0b40-bundle\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p\" (UID: \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.582108 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fec10fd5-c022-46d6-bc89-53e69e2c0b40-bundle\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p\" (UID: \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.582916 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fec10fd5-c022-46d6-bc89-53e69e2c0b40-util\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p\" (UID: \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.604760 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnv6f\" (UniqueName: \"kubernetes.io/projected/fec10fd5-c022-46d6-bc89-53e69e2c0b40-kube-api-access-mnv6f\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p\" (UID: \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.642628 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.707210 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x"] Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.708773 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.726299 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x"] Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.887912 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/71212718-968f-4f4c-84e7-83c0e34b6597-util\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x\" (UID: \"71212718-968f-4f4c-84e7-83c0e34b6597\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.888349 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/71212718-968f-4f4c-84e7-83c0e34b6597-bundle\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x\" (UID: \"71212718-968f-4f4c-84e7-83c0e34b6597\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.888377 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ht2sz\" (UniqueName: \"kubernetes.io/projected/71212718-968f-4f4c-84e7-83c0e34b6597-kube-api-access-ht2sz\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x\" (UID: \"71212718-968f-4f4c-84e7-83c0e34b6597\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.971385 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p"] Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.990184 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/71212718-968f-4f4c-84e7-83c0e34b6597-util\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x\" (UID: \"71212718-968f-4f4c-84e7-83c0e34b6597\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.990236 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/71212718-968f-4f4c-84e7-83c0e34b6597-bundle\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x\" (UID: \"71212718-968f-4f4c-84e7-83c0e34b6597\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.990263 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ht2sz\" (UniqueName: \"kubernetes.io/projected/71212718-968f-4f4c-84e7-83c0e34b6597-kube-api-access-ht2sz\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x\" (UID: \"71212718-968f-4f4c-84e7-83c0e34b6597\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.990796 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/71212718-968f-4f4c-84e7-83c0e34b6597-util\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x\" (UID: \"71212718-968f-4f4c-84e7-83c0e34b6597\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" Jan 27 12:40:11 crc kubenswrapper[4900]: I0127 12:40:11.990975 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/71212718-968f-4f4c-84e7-83c0e34b6597-bundle\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x\" (UID: \"71212718-968f-4f4c-84e7-83c0e34b6597\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" Jan 27 12:40:12 crc kubenswrapper[4900]: I0127 12:40:12.014414 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ht2sz\" (UniqueName: \"kubernetes.io/projected/71212718-968f-4f4c-84e7-83c0e34b6597-kube-api-access-ht2sz\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x\" (UID: \"71212718-968f-4f4c-84e7-83c0e34b6597\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" Jan 27 12:40:12 crc kubenswrapper[4900]: I0127 12:40:12.038953 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" Jan 27 12:40:12 crc kubenswrapper[4900]: I0127 12:40:12.094357 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" event={"ID":"fec10fd5-c022-46d6-bc89-53e69e2c0b40","Type":"ContainerStarted","Data":"ce5a460cf57ace17ab8833402c8dcac242c6c649b932b8a3653ec5b39f3f8344"} Jan 27 12:40:12 crc kubenswrapper[4900]: W0127 12:40:12.562346 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71212718_968f_4f4c_84e7_83c0e34b6597.slice/crio-ae82a318246ba1a279f25b72d1ab00316a2ecddfde391e408da2cb43ba06c41a WatchSource:0}: Error finding container ae82a318246ba1a279f25b72d1ab00316a2ecddfde391e408da2cb43ba06c41a: Status 404 returned error can't find the container with id ae82a318246ba1a279f25b72d1ab00316a2ecddfde391e408da2cb43ba06c41a Jan 27 12:40:12 crc kubenswrapper[4900]: I0127 12:40:12.564364 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x"] Jan 27 12:40:13 crc kubenswrapper[4900]: I0127 12:40:13.103868 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" event={"ID":"71212718-968f-4f4c-84e7-83c0e34b6597","Type":"ContainerStarted","Data":"925771a01389aeb53b6ebefb6e30acd7b8bbee8107716b7af6b143626d4dde66"} Jan 27 12:40:13 crc kubenswrapper[4900]: I0127 12:40:13.104331 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" event={"ID":"71212718-968f-4f4c-84e7-83c0e34b6597","Type":"ContainerStarted","Data":"ae82a318246ba1a279f25b72d1ab00316a2ecddfde391e408da2cb43ba06c41a"} Jan 27 12:40:13 crc kubenswrapper[4900]: I0127 12:40:13.105656 4900 generic.go:334] "Generic (PLEG): container finished" podID="fec10fd5-c022-46d6-bc89-53e69e2c0b40" containerID="65ca511442693001f52da115c210413aba7744ea45db3f907a6100b67b63065c" exitCode=0 Jan 27 12:40:13 crc kubenswrapper[4900]: I0127 12:40:13.105726 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" event={"ID":"fec10fd5-c022-46d6-bc89-53e69e2c0b40","Type":"ContainerDied","Data":"65ca511442693001f52da115c210413aba7744ea45db3f907a6100b67b63065c"} Jan 27 12:40:14 crc kubenswrapper[4900]: I0127 12:40:14.141938 4900 generic.go:334] "Generic (PLEG): container finished" podID="71212718-968f-4f4c-84e7-83c0e34b6597" containerID="925771a01389aeb53b6ebefb6e30acd7b8bbee8107716b7af6b143626d4dde66" exitCode=0 Jan 27 12:40:14 crc kubenswrapper[4900]: I0127 12:40:14.142003 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" event={"ID":"71212718-968f-4f4c-84e7-83c0e34b6597","Type":"ContainerDied","Data":"925771a01389aeb53b6ebefb6e30acd7b8bbee8107716b7af6b143626d4dde66"} Jan 27 12:40:15 crc kubenswrapper[4900]: I0127 12:40:15.163961 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" event={"ID":"fec10fd5-c022-46d6-bc89-53e69e2c0b40","Type":"ContainerStarted","Data":"b348c2405ff7badea527822bf3e94340eb91889d81d2362373a71bc11adad816"} Jan 27 12:40:16 crc kubenswrapper[4900]: I0127 12:40:16.542564 4900 generic.go:334] "Generic (PLEG): container finished" podID="71212718-968f-4f4c-84e7-83c0e34b6597" containerID="509f223895d33aa8dc20ede8c61fdf9014e252708b480f6178e12bee76c2801a" exitCode=0 Jan 27 12:40:16 crc kubenswrapper[4900]: I0127 12:40:16.543139 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" event={"ID":"71212718-968f-4f4c-84e7-83c0e34b6597","Type":"ContainerDied","Data":"509f223895d33aa8dc20ede8c61fdf9014e252708b480f6178e12bee76c2801a"} Jan 27 12:40:16 crc kubenswrapper[4900]: I0127 12:40:16.551650 4900 generic.go:334] "Generic (PLEG): container finished" podID="fec10fd5-c022-46d6-bc89-53e69e2c0b40" containerID="b348c2405ff7badea527822bf3e94340eb91889d81d2362373a71bc11adad816" exitCode=0 Jan 27 12:40:16 crc kubenswrapper[4900]: I0127 12:40:16.551758 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" event={"ID":"fec10fd5-c022-46d6-bc89-53e69e2c0b40","Type":"ContainerDied","Data":"b348c2405ff7badea527822bf3e94340eb91889d81d2362373a71bc11adad816"} Jan 27 12:40:17 crc kubenswrapper[4900]: I0127 12:40:17.561567 4900 generic.go:334] "Generic (PLEG): container finished" podID="fec10fd5-c022-46d6-bc89-53e69e2c0b40" containerID="58bc753149a19ccec60f5347598bf859853a51629f4731d7d81f224f7af9d929" exitCode=0 Jan 27 12:40:17 crc kubenswrapper[4900]: I0127 12:40:17.561946 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" event={"ID":"fec10fd5-c022-46d6-bc89-53e69e2c0b40","Type":"ContainerDied","Data":"58bc753149a19ccec60f5347598bf859853a51629f4731d7d81f224f7af9d929"} Jan 27 12:40:17 crc kubenswrapper[4900]: I0127 12:40:17.567907 4900 generic.go:334] "Generic (PLEG): container finished" podID="71212718-968f-4f4c-84e7-83c0e34b6597" containerID="d84ac998aad8759f752bb67217e0de1265e822d1bc23246fee2575c6c5b916c4" exitCode=0 Jan 27 12:40:17 crc kubenswrapper[4900]: I0127 12:40:17.567988 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" event={"ID":"71212718-968f-4f4c-84e7-83c0e34b6597","Type":"ContainerDied","Data":"d84ac998aad8759f752bb67217e0de1265e822d1bc23246fee2575c6c5b916c4"} Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.099432 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.164011 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.218366 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fec10fd5-c022-46d6-bc89-53e69e2c0b40-util\") pod \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\" (UID: \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\") " Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.218482 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fec10fd5-c022-46d6-bc89-53e69e2c0b40-bundle\") pod \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\" (UID: \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\") " Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.218583 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnv6f\" (UniqueName: \"kubernetes.io/projected/fec10fd5-c022-46d6-bc89-53e69e2c0b40-kube-api-access-mnv6f\") pod \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\" (UID: \"fec10fd5-c022-46d6-bc89-53e69e2c0b40\") " Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.220919 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fec10fd5-c022-46d6-bc89-53e69e2c0b40-bundle" (OuterVolumeSpecName: "bundle") pod "fec10fd5-c022-46d6-bc89-53e69e2c0b40" (UID: "fec10fd5-c022-46d6-bc89-53e69e2c0b40"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.229202 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fec10fd5-c022-46d6-bc89-53e69e2c0b40-kube-api-access-mnv6f" (OuterVolumeSpecName: "kube-api-access-mnv6f") pod "fec10fd5-c022-46d6-bc89-53e69e2c0b40" (UID: "fec10fd5-c022-46d6-bc89-53e69e2c0b40"). InnerVolumeSpecName "kube-api-access-mnv6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.246193 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fec10fd5-c022-46d6-bc89-53e69e2c0b40-util" (OuterVolumeSpecName: "util") pod "fec10fd5-c022-46d6-bc89-53e69e2c0b40" (UID: "fec10fd5-c022-46d6-bc89-53e69e2c0b40"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.320737 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ht2sz\" (UniqueName: \"kubernetes.io/projected/71212718-968f-4f4c-84e7-83c0e34b6597-kube-api-access-ht2sz\") pod \"71212718-968f-4f4c-84e7-83c0e34b6597\" (UID: \"71212718-968f-4f4c-84e7-83c0e34b6597\") " Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.320824 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/71212718-968f-4f4c-84e7-83c0e34b6597-util\") pod \"71212718-968f-4f4c-84e7-83c0e34b6597\" (UID: \"71212718-968f-4f4c-84e7-83c0e34b6597\") " Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.320886 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/71212718-968f-4f4c-84e7-83c0e34b6597-bundle\") pod \"71212718-968f-4f4c-84e7-83c0e34b6597\" (UID: \"71212718-968f-4f4c-84e7-83c0e34b6597\") " Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.321345 4900 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fec10fd5-c022-46d6-bc89-53e69e2c0b40-util\") on node \"crc\" DevicePath \"\"" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.321363 4900 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fec10fd5-c022-46d6-bc89-53e69e2c0b40-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.321373 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnv6f\" (UniqueName: \"kubernetes.io/projected/fec10fd5-c022-46d6-bc89-53e69e2c0b40-kube-api-access-mnv6f\") on node \"crc\" DevicePath \"\"" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.322314 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71212718-968f-4f4c-84e7-83c0e34b6597-bundle" (OuterVolumeSpecName: "bundle") pod "71212718-968f-4f4c-84e7-83c0e34b6597" (UID: "71212718-968f-4f4c-84e7-83c0e34b6597"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.324743 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71212718-968f-4f4c-84e7-83c0e34b6597-kube-api-access-ht2sz" (OuterVolumeSpecName: "kube-api-access-ht2sz") pod "71212718-968f-4f4c-84e7-83c0e34b6597" (UID: "71212718-968f-4f4c-84e7-83c0e34b6597"). InnerVolumeSpecName "kube-api-access-ht2sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.331804 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71212718-968f-4f4c-84e7-83c0e34b6597-util" (OuterVolumeSpecName: "util") pod "71212718-968f-4f4c-84e7-83c0e34b6597" (UID: "71212718-968f-4f4c-84e7-83c0e34b6597"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.423227 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ht2sz\" (UniqueName: \"kubernetes.io/projected/71212718-968f-4f4c-84e7-83c0e34b6597-kube-api-access-ht2sz\") on node \"crc\" DevicePath \"\"" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.423275 4900 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/71212718-968f-4f4c-84e7-83c0e34b6597-util\") on node \"crc\" DevicePath \"\"" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.423286 4900 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/71212718-968f-4f4c-84e7-83c0e34b6597-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.589509 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" event={"ID":"71212718-968f-4f4c-84e7-83c0e34b6597","Type":"ContainerDied","Data":"ae82a318246ba1a279f25b72d1ab00316a2ecddfde391e408da2cb43ba06c41a"} Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.590035 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ae82a318246ba1a279f25b72d1ab00316a2ecddfde391e408da2cb43ba06c41a" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.589587 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.592665 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.592658 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p" event={"ID":"fec10fd5-c022-46d6-bc89-53e69e2c0b40","Type":"ContainerDied","Data":"ce5a460cf57ace17ab8833402c8dcac242c6c649b932b8a3653ec5b39f3f8344"} Jan 27 12:40:19 crc kubenswrapper[4900]: I0127 12:40:19.592755 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce5a460cf57ace17ab8833402c8dcac242c6c649b932b8a3653ec5b39f3f8344" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.068895 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt"] Jan 27 12:40:27 crc kubenswrapper[4900]: E0127 12:40:27.071612 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fec10fd5-c022-46d6-bc89-53e69e2c0b40" containerName="util" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.071722 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="fec10fd5-c022-46d6-bc89-53e69e2c0b40" containerName="util" Jan 27 12:40:27 crc kubenswrapper[4900]: E0127 12:40:27.071813 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fec10fd5-c022-46d6-bc89-53e69e2c0b40" containerName="pull" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.071931 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="fec10fd5-c022-46d6-bc89-53e69e2c0b40" containerName="pull" Jan 27 12:40:27 crc kubenswrapper[4900]: E0127 12:40:27.072005 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fec10fd5-c022-46d6-bc89-53e69e2c0b40" containerName="extract" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.072074 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="fec10fd5-c022-46d6-bc89-53e69e2c0b40" containerName="extract" Jan 27 12:40:27 crc kubenswrapper[4900]: E0127 12:40:27.072136 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71212718-968f-4f4c-84e7-83c0e34b6597" containerName="pull" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.072200 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="71212718-968f-4f4c-84e7-83c0e34b6597" containerName="pull" Jan 27 12:40:27 crc kubenswrapper[4900]: E0127 12:40:27.072265 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71212718-968f-4f4c-84e7-83c0e34b6597" containerName="extract" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.072327 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="71212718-968f-4f4c-84e7-83c0e34b6597" containerName="extract" Jan 27 12:40:27 crc kubenswrapper[4900]: E0127 12:40:27.072396 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71212718-968f-4f4c-84e7-83c0e34b6597" containerName="util" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.072472 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="71212718-968f-4f4c-84e7-83c0e34b6597" containerName="util" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.072739 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="71212718-968f-4f4c-84e7-83c0e34b6597" containerName="extract" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.072838 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="fec10fd5-c022-46d6-bc89-53e69e2c0b40" containerName="extract" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.073976 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.077882 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-fwpf6" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.078007 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.078954 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.079191 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.079372 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.079638 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.090922 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt"] Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.159718 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/386bc10f-9e5d-49d0-9906-e97f1796d49d-apiservice-cert\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.159812 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/386bc10f-9e5d-49d0-9906-e97f1796d49d-manager-config\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.159889 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/386bc10f-9e5d-49d0-9906-e97f1796d49d-webhook-cert\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.159909 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s94wt\" (UniqueName: \"kubernetes.io/projected/386bc10f-9e5d-49d0-9906-e97f1796d49d-kube-api-access-s94wt\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.159935 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/386bc10f-9e5d-49d0-9906-e97f1796d49d-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.261265 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/386bc10f-9e5d-49d0-9906-e97f1796d49d-webhook-cert\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.261343 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s94wt\" (UniqueName: \"kubernetes.io/projected/386bc10f-9e5d-49d0-9906-e97f1796d49d-kube-api-access-s94wt\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.261381 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/386bc10f-9e5d-49d0-9906-e97f1796d49d-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.261439 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/386bc10f-9e5d-49d0-9906-e97f1796d49d-apiservice-cert\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.261501 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/386bc10f-9e5d-49d0-9906-e97f1796d49d-manager-config\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.262730 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/386bc10f-9e5d-49d0-9906-e97f1796d49d-manager-config\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.276217 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/386bc10f-9e5d-49d0-9906-e97f1796d49d-apiservice-cert\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.276217 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/386bc10f-9e5d-49d0-9906-e97f1796d49d-webhook-cert\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.293894 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/386bc10f-9e5d-49d0-9906-e97f1796d49d-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.300801 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s94wt\" (UniqueName: \"kubernetes.io/projected/386bc10f-9e5d-49d0-9906-e97f1796d49d-kube-api-access-s94wt\") pod \"loki-operator-controller-manager-849c99c676-jbpgt\" (UID: \"386bc10f-9e5d-49d0-9906-e97f1796d49d\") " pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:27 crc kubenswrapper[4900]: I0127 12:40:27.393194 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:28 crc kubenswrapper[4900]: I0127 12:40:28.082049 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt"] Jan 27 12:40:28 crc kubenswrapper[4900]: I0127 12:40:28.690759 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" event={"ID":"386bc10f-9e5d-49d0-9906-e97f1796d49d","Type":"ContainerStarted","Data":"f832bf8fa4d66fde999f6b373bfb38ecf367f4e5b37f39393c73e42debed22e6"} Jan 27 12:40:33 crc kubenswrapper[4900]: I0127 12:40:33.220903 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/cluster-logging-operator-79cf69ddc8-kx46n"] Jan 27 12:40:33 crc kubenswrapper[4900]: I0127 12:40:33.229865 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-kx46n" Jan 27 12:40:33 crc kubenswrapper[4900]: I0127 12:40:33.233220 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-6dmqp" Jan 27 12:40:33 crc kubenswrapper[4900]: I0127 12:40:33.233812 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Jan 27 12:40:33 crc kubenswrapper[4900]: I0127 12:40:33.235694 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Jan 27 12:40:33 crc kubenswrapper[4900]: I0127 12:40:33.253968 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-79cf69ddc8-kx46n"] Jan 27 12:40:33 crc kubenswrapper[4900]: I0127 12:40:33.382096 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5rjk\" (UniqueName: \"kubernetes.io/projected/f00a0f13-11d5-4ee7-9276-21722a3ce14f-kube-api-access-m5rjk\") pod \"cluster-logging-operator-79cf69ddc8-kx46n\" (UID: \"f00a0f13-11d5-4ee7-9276-21722a3ce14f\") " pod="openshift-logging/cluster-logging-operator-79cf69ddc8-kx46n" Jan 27 12:40:33 crc kubenswrapper[4900]: I0127 12:40:33.485876 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5rjk\" (UniqueName: \"kubernetes.io/projected/f00a0f13-11d5-4ee7-9276-21722a3ce14f-kube-api-access-m5rjk\") pod \"cluster-logging-operator-79cf69ddc8-kx46n\" (UID: \"f00a0f13-11d5-4ee7-9276-21722a3ce14f\") " pod="openshift-logging/cluster-logging-operator-79cf69ddc8-kx46n" Jan 27 12:40:33 crc kubenswrapper[4900]: I0127 12:40:33.526885 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5rjk\" (UniqueName: \"kubernetes.io/projected/f00a0f13-11d5-4ee7-9276-21722a3ce14f-kube-api-access-m5rjk\") pod \"cluster-logging-operator-79cf69ddc8-kx46n\" (UID: \"f00a0f13-11d5-4ee7-9276-21722a3ce14f\") " pod="openshift-logging/cluster-logging-operator-79cf69ddc8-kx46n" Jan 27 12:40:33 crc kubenswrapper[4900]: I0127 12:40:33.554229 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-kx46n" Jan 27 12:40:35 crc kubenswrapper[4900]: I0127 12:40:35.531039 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-79cf69ddc8-kx46n"] Jan 27 12:40:35 crc kubenswrapper[4900]: W0127 12:40:35.532710 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf00a0f13_11d5_4ee7_9276_21722a3ce14f.slice/crio-c14b8ea21ad29c175758693d19bb050b72841c79433f291e61b4bf275417ffbb WatchSource:0}: Error finding container c14b8ea21ad29c175758693d19bb050b72841c79433f291e61b4bf275417ffbb: Status 404 returned error can't find the container with id c14b8ea21ad29c175758693d19bb050b72841c79433f291e61b4bf275417ffbb Jan 27 12:40:35 crc kubenswrapper[4900]: I0127 12:40:35.839506 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-kx46n" event={"ID":"f00a0f13-11d5-4ee7-9276-21722a3ce14f","Type":"ContainerStarted","Data":"c14b8ea21ad29c175758693d19bb050b72841c79433f291e61b4bf275417ffbb"} Jan 27 12:40:35 crc kubenswrapper[4900]: I0127 12:40:35.843256 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" event={"ID":"386bc10f-9e5d-49d0-9906-e97f1796d49d","Type":"ContainerStarted","Data":"0a7c33b86ae959b3374d7b15c16ef929c9d99e1062e990b704c79f33f919c418"} Jan 27 12:40:49 crc kubenswrapper[4900]: I0127 12:40:49.133440 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vgbqt"] Jan 27 12:40:49 crc kubenswrapper[4900]: I0127 12:40:49.137477 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:40:49 crc kubenswrapper[4900]: I0127 12:40:49.207911 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vgbqt"] Jan 27 12:40:49 crc kubenswrapper[4900]: I0127 12:40:49.418763 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20f11429-1efb-4c02-ba06-8905873cdf48-catalog-content\") pod \"redhat-marketplace-vgbqt\" (UID: \"20f11429-1efb-4c02-ba06-8905873cdf48\") " pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:40:49 crc kubenswrapper[4900]: I0127 12:40:49.418908 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20f11429-1efb-4c02-ba06-8905873cdf48-utilities\") pod \"redhat-marketplace-vgbqt\" (UID: \"20f11429-1efb-4c02-ba06-8905873cdf48\") " pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:40:49 crc kubenswrapper[4900]: I0127 12:40:49.418945 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssgp9\" (UniqueName: \"kubernetes.io/projected/20f11429-1efb-4c02-ba06-8905873cdf48-kube-api-access-ssgp9\") pod \"redhat-marketplace-vgbqt\" (UID: \"20f11429-1efb-4c02-ba06-8905873cdf48\") " pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:40:49 crc kubenswrapper[4900]: I0127 12:40:49.520818 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20f11429-1efb-4c02-ba06-8905873cdf48-utilities\") pod \"redhat-marketplace-vgbqt\" (UID: \"20f11429-1efb-4c02-ba06-8905873cdf48\") " pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:40:49 crc kubenswrapper[4900]: I0127 12:40:49.520917 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssgp9\" (UniqueName: \"kubernetes.io/projected/20f11429-1efb-4c02-ba06-8905873cdf48-kube-api-access-ssgp9\") pod \"redhat-marketplace-vgbqt\" (UID: \"20f11429-1efb-4c02-ba06-8905873cdf48\") " pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:40:49 crc kubenswrapper[4900]: I0127 12:40:49.521005 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20f11429-1efb-4c02-ba06-8905873cdf48-catalog-content\") pod \"redhat-marketplace-vgbqt\" (UID: \"20f11429-1efb-4c02-ba06-8905873cdf48\") " pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:40:49 crc kubenswrapper[4900]: I0127 12:40:49.521659 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20f11429-1efb-4c02-ba06-8905873cdf48-catalog-content\") pod \"redhat-marketplace-vgbqt\" (UID: \"20f11429-1efb-4c02-ba06-8905873cdf48\") " pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:40:49 crc kubenswrapper[4900]: I0127 12:40:49.522889 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20f11429-1efb-4c02-ba06-8905873cdf48-utilities\") pod \"redhat-marketplace-vgbqt\" (UID: \"20f11429-1efb-4c02-ba06-8905873cdf48\") " pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:40:49 crc kubenswrapper[4900]: I0127 12:40:49.562856 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssgp9\" (UniqueName: \"kubernetes.io/projected/20f11429-1efb-4c02-ba06-8905873cdf48-kube-api-access-ssgp9\") pod \"redhat-marketplace-vgbqt\" (UID: \"20f11429-1efb-4c02-ba06-8905873cdf48\") " pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:40:49 crc kubenswrapper[4900]: I0127 12:40:49.812628 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:40:51 crc kubenswrapper[4900]: E0127 12:40:51.139820 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/openshift-logging/cluster-logging-rhel9-operator@sha256:80cac88d8ff5b40036e5983f5dacfc08702afe9c7a66b48d1c88bcb149c285b3" Jan 27 12:40:51 crc kubenswrapper[4900]: E0127 12:40:51.140525 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cluster-logging-operator,Image:registry.redhat.io/openshift-logging/cluster-logging-rhel9-operator@sha256:80cac88d8ff5b40036e5983f5dacfc08702afe9c7a66b48d1c88bcb149c285b3,Command:[cluster-logging-operator],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:WATCH_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.annotations['olm.targetNamespaces'],},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:OPERATOR_NAME,Value:cluster-logging-operator,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_VECTOR,Value:registry.redhat.io/openshift-logging/vector-rhel9@sha256:fa2cfa2ed336ce105c8dea5bfe0825407e37ef296193ae162f515213fe43c8d5,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_LOG_FILE_METRIC_EXPORTER,Value:registry.redhat.io/openshift-logging/log-file-metric-exporter-rhel9@sha256:0d2edaf37f5e25155f9a3086e81d40686b102a78c3ae35b07e0c5992d3a7fb40,ValueFrom:nil,},EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-logging.v6.2.7,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-m5rjk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000690000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cluster-logging-operator-79cf69ddc8-kx46n_openshift-logging(f00a0f13-11d5-4ee7-9276-21722a3ce14f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 27 12:40:51 crc kubenswrapper[4900]: E0127 12:40:51.141881 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-logging-operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-kx46n" podUID="f00a0f13-11d5-4ee7-9276-21722a3ce14f" Jan 27 12:40:51 crc kubenswrapper[4900]: E0127 12:40:51.307383 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-logging-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift-logging/cluster-logging-rhel9-operator@sha256:80cac88d8ff5b40036e5983f5dacfc08702afe9c7a66b48d1c88bcb149c285b3\\\"\"" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-kx46n" podUID="f00a0f13-11d5-4ee7-9276-21722a3ce14f" Jan 27 12:40:51 crc kubenswrapper[4900]: I0127 12:40:51.484326 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vgbqt"] Jan 27 12:40:52 crc kubenswrapper[4900]: I0127 12:40:52.315999 4900 generic.go:334] "Generic (PLEG): container finished" podID="20f11429-1efb-4c02-ba06-8905873cdf48" containerID="29d66c332cca025f78b75851a7f2870c1707c77ceaf312826ec81c66ecb42eaa" exitCode=0 Jan 27 12:40:52 crc kubenswrapper[4900]: I0127 12:40:52.316251 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vgbqt" event={"ID":"20f11429-1efb-4c02-ba06-8905873cdf48","Type":"ContainerDied","Data":"29d66c332cca025f78b75851a7f2870c1707c77ceaf312826ec81c66ecb42eaa"} Jan 27 12:40:52 crc kubenswrapper[4900]: I0127 12:40:52.317430 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vgbqt" event={"ID":"20f11429-1efb-4c02-ba06-8905873cdf48","Type":"ContainerStarted","Data":"efdaf466925230e3d1615b5add340bded69f340f97c78e107bcdc2cd0c692473"} Jan 27 12:40:52 crc kubenswrapper[4900]: I0127 12:40:52.320338 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" event={"ID":"386bc10f-9e5d-49d0-9906-e97f1796d49d","Type":"ContainerStarted","Data":"b060a509466fc36681dc2cc1e893a23dab95b3da48bf892a365bddc72690b5bf"} Jan 27 12:40:52 crc kubenswrapper[4900]: I0127 12:40:52.321717 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:52 crc kubenswrapper[4900]: I0127 12:40:52.325490 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" Jan 27 12:40:52 crc kubenswrapper[4900]: I0127 12:40:52.369215 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" podStartSLOduration=2.292334936 podStartE2EDuration="25.369184668s" podCreationTimestamp="2026-01-27 12:40:27 +0000 UTC" firstStartedPulling="2026-01-27 12:40:28.098199755 +0000 UTC m=+855.335227975" lastFinishedPulling="2026-01-27 12:40:51.175049497 +0000 UTC m=+878.412077707" observedRunningTime="2026-01-27 12:40:52.3637368 +0000 UTC m=+879.600765010" watchObservedRunningTime="2026-01-27 12:40:52.369184668 +0000 UTC m=+879.606212878" Jan 27 12:40:53 crc kubenswrapper[4900]: I0127 12:40:53.332906 4900 generic.go:334] "Generic (PLEG): container finished" podID="20f11429-1efb-4c02-ba06-8905873cdf48" containerID="1e4108314e08ba9055af58b475d682cd794f8a54ce919757e9a1cc7304d2c927" exitCode=0 Jan 27 12:40:53 crc kubenswrapper[4900]: I0127 12:40:53.332985 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vgbqt" event={"ID":"20f11429-1efb-4c02-ba06-8905873cdf48","Type":"ContainerDied","Data":"1e4108314e08ba9055af58b475d682cd794f8a54ce919757e9a1cc7304d2c927"} Jan 27 12:40:54 crc kubenswrapper[4900]: I0127 12:40:54.348863 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vgbqt" event={"ID":"20f11429-1efb-4c02-ba06-8905873cdf48","Type":"ContainerStarted","Data":"8f699d2f082579348362a50a0094c409f6179af38e0b2dace5e433ba0f32f1fd"} Jan 27 12:40:59 crc kubenswrapper[4900]: I0127 12:40:59.813419 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:40:59 crc kubenswrapper[4900]: I0127 12:40:59.814309 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:40:59 crc kubenswrapper[4900]: I0127 12:40:59.869428 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:40:59 crc kubenswrapper[4900]: I0127 12:40:59.895041 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vgbqt" podStartSLOduration=9.464476961999999 podStartE2EDuration="10.89500527s" podCreationTimestamp="2026-01-27 12:40:49 +0000 UTC" firstStartedPulling="2026-01-27 12:40:52.318148854 +0000 UTC m=+879.555177074" lastFinishedPulling="2026-01-27 12:40:53.748677172 +0000 UTC m=+880.985705382" observedRunningTime="2026-01-27 12:40:54.390108678 +0000 UTC m=+881.627136888" watchObservedRunningTime="2026-01-27 12:40:59.89500527 +0000 UTC m=+887.132033480" Jan 27 12:41:00 crc kubenswrapper[4900]: I0127 12:41:00.463089 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:41:00 crc kubenswrapper[4900]: I0127 12:41:00.517318 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vgbqt"] Jan 27 12:41:02 crc kubenswrapper[4900]: I0127 12:41:02.409154 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vgbqt" podUID="20f11429-1efb-4c02-ba06-8905873cdf48" containerName="registry-server" containerID="cri-o://8f699d2f082579348362a50a0094c409f6179af38e0b2dace5e433ba0f32f1fd" gracePeriod=2 Jan 27 12:41:03 crc kubenswrapper[4900]: I0127 12:41:03.429272 4900 generic.go:334] "Generic (PLEG): container finished" podID="20f11429-1efb-4c02-ba06-8905873cdf48" containerID="8f699d2f082579348362a50a0094c409f6179af38e0b2dace5e433ba0f32f1fd" exitCode=0 Jan 27 12:41:03 crc kubenswrapper[4900]: I0127 12:41:03.429356 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vgbqt" event={"ID":"20f11429-1efb-4c02-ba06-8905873cdf48","Type":"ContainerDied","Data":"8f699d2f082579348362a50a0094c409f6179af38e0b2dace5e433ba0f32f1fd"} Jan 27 12:41:03 crc kubenswrapper[4900]: I0127 12:41:03.469878 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:41:03 crc kubenswrapper[4900]: I0127 12:41:03.637945 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20f11429-1efb-4c02-ba06-8905873cdf48-catalog-content\") pod \"20f11429-1efb-4c02-ba06-8905873cdf48\" (UID: \"20f11429-1efb-4c02-ba06-8905873cdf48\") " Jan 27 12:41:03 crc kubenswrapper[4900]: I0127 12:41:03.638025 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20f11429-1efb-4c02-ba06-8905873cdf48-utilities\") pod \"20f11429-1efb-4c02-ba06-8905873cdf48\" (UID: \"20f11429-1efb-4c02-ba06-8905873cdf48\") " Jan 27 12:41:03 crc kubenswrapper[4900]: I0127 12:41:03.638096 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ssgp9\" (UniqueName: \"kubernetes.io/projected/20f11429-1efb-4c02-ba06-8905873cdf48-kube-api-access-ssgp9\") pod \"20f11429-1efb-4c02-ba06-8905873cdf48\" (UID: \"20f11429-1efb-4c02-ba06-8905873cdf48\") " Jan 27 12:41:03 crc kubenswrapper[4900]: I0127 12:41:03.642868 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20f11429-1efb-4c02-ba06-8905873cdf48-utilities" (OuterVolumeSpecName: "utilities") pod "20f11429-1efb-4c02-ba06-8905873cdf48" (UID: "20f11429-1efb-4c02-ba06-8905873cdf48"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:41:03 crc kubenswrapper[4900]: I0127 12:41:03.644560 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20f11429-1efb-4c02-ba06-8905873cdf48-kube-api-access-ssgp9" (OuterVolumeSpecName: "kube-api-access-ssgp9") pod "20f11429-1efb-4c02-ba06-8905873cdf48" (UID: "20f11429-1efb-4c02-ba06-8905873cdf48"). InnerVolumeSpecName "kube-api-access-ssgp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:41:03 crc kubenswrapper[4900]: I0127 12:41:03.740858 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20f11429-1efb-4c02-ba06-8905873cdf48-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:41:03 crc kubenswrapper[4900]: I0127 12:41:03.740915 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ssgp9\" (UniqueName: \"kubernetes.io/projected/20f11429-1efb-4c02-ba06-8905873cdf48-kube-api-access-ssgp9\") on node \"crc\" DevicePath \"\"" Jan 27 12:41:03 crc kubenswrapper[4900]: I0127 12:41:03.771711 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20f11429-1efb-4c02-ba06-8905873cdf48-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "20f11429-1efb-4c02-ba06-8905873cdf48" (UID: "20f11429-1efb-4c02-ba06-8905873cdf48"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:41:03 crc kubenswrapper[4900]: I0127 12:41:03.842795 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20f11429-1efb-4c02-ba06-8905873cdf48-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:41:04 crc kubenswrapper[4900]: I0127 12:41:04.439660 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vgbqt" event={"ID":"20f11429-1efb-4c02-ba06-8905873cdf48","Type":"ContainerDied","Data":"efdaf466925230e3d1615b5add340bded69f340f97c78e107bcdc2cd0c692473"} Jan 27 12:41:04 crc kubenswrapper[4900]: I0127 12:41:04.439802 4900 scope.go:117] "RemoveContainer" containerID="8f699d2f082579348362a50a0094c409f6179af38e0b2dace5e433ba0f32f1fd" Jan 27 12:41:04 crc kubenswrapper[4900]: I0127 12:41:04.439795 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vgbqt" Jan 27 12:41:04 crc kubenswrapper[4900]: I0127 12:41:04.467201 4900 scope.go:117] "RemoveContainer" containerID="1e4108314e08ba9055af58b475d682cd794f8a54ce919757e9a1cc7304d2c927" Jan 27 12:41:04 crc kubenswrapper[4900]: I0127 12:41:04.472561 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vgbqt"] Jan 27 12:41:04 crc kubenswrapper[4900]: I0127 12:41:04.478500 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vgbqt"] Jan 27 12:41:04 crc kubenswrapper[4900]: I0127 12:41:04.485439 4900 scope.go:117] "RemoveContainer" containerID="29d66c332cca025f78b75851a7f2870c1707c77ceaf312826ec81c66ecb42eaa" Jan 27 12:41:04 crc kubenswrapper[4900]: I0127 12:41:04.499636 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20f11429-1efb-4c02-ba06-8905873cdf48" path="/var/lib/kubelet/pods/20f11429-1efb-4c02-ba06-8905873cdf48/volumes" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.296396 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vs74w"] Jan 27 12:41:06 crc kubenswrapper[4900]: E0127 12:41:06.297074 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20f11429-1efb-4c02-ba06-8905873cdf48" containerName="extract-utilities" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.297091 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="20f11429-1efb-4c02-ba06-8905873cdf48" containerName="extract-utilities" Jan 27 12:41:06 crc kubenswrapper[4900]: E0127 12:41:06.297111 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20f11429-1efb-4c02-ba06-8905873cdf48" containerName="registry-server" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.297118 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="20f11429-1efb-4c02-ba06-8905873cdf48" containerName="registry-server" Jan 27 12:41:06 crc kubenswrapper[4900]: E0127 12:41:06.297162 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20f11429-1efb-4c02-ba06-8905873cdf48" containerName="extract-content" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.297170 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="20f11429-1efb-4c02-ba06-8905873cdf48" containerName="extract-content" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.297412 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="20f11429-1efb-4c02-ba06-8905873cdf48" containerName="registry-server" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.298829 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.312236 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vs74w"] Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.392751 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a21684ab-2dff-4650-9633-4d442ce77752-utilities\") pod \"certified-operators-vs74w\" (UID: \"a21684ab-2dff-4650-9633-4d442ce77752\") " pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.392839 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a21684ab-2dff-4650-9633-4d442ce77752-catalog-content\") pod \"certified-operators-vs74w\" (UID: \"a21684ab-2dff-4650-9633-4d442ce77752\") " pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.392886 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8l765\" (UniqueName: \"kubernetes.io/projected/a21684ab-2dff-4650-9633-4d442ce77752-kube-api-access-8l765\") pod \"certified-operators-vs74w\" (UID: \"a21684ab-2dff-4650-9633-4d442ce77752\") " pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.494319 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a21684ab-2dff-4650-9633-4d442ce77752-utilities\") pod \"certified-operators-vs74w\" (UID: \"a21684ab-2dff-4650-9633-4d442ce77752\") " pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.494387 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a21684ab-2dff-4650-9633-4d442ce77752-catalog-content\") pod \"certified-operators-vs74w\" (UID: \"a21684ab-2dff-4650-9633-4d442ce77752\") " pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.494423 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8l765\" (UniqueName: \"kubernetes.io/projected/a21684ab-2dff-4650-9633-4d442ce77752-kube-api-access-8l765\") pod \"certified-operators-vs74w\" (UID: \"a21684ab-2dff-4650-9633-4d442ce77752\") " pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.494903 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a21684ab-2dff-4650-9633-4d442ce77752-catalog-content\") pod \"certified-operators-vs74w\" (UID: \"a21684ab-2dff-4650-9633-4d442ce77752\") " pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.495146 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a21684ab-2dff-4650-9633-4d442ce77752-utilities\") pod \"certified-operators-vs74w\" (UID: \"a21684ab-2dff-4650-9633-4d442ce77752\") " pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.519434 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8l765\" (UniqueName: \"kubernetes.io/projected/a21684ab-2dff-4650-9633-4d442ce77752-kube-api-access-8l765\") pod \"certified-operators-vs74w\" (UID: \"a21684ab-2dff-4650-9633-4d442ce77752\") " pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:06 crc kubenswrapper[4900]: I0127 12:41:06.617105 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:07 crc kubenswrapper[4900]: I0127 12:41:07.043224 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vs74w"] Jan 27 12:41:07 crc kubenswrapper[4900]: W0127 12:41:07.045536 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda21684ab_2dff_4650_9633_4d442ce77752.slice/crio-08f1682942eb9e22f52ccf984961b2ffcced841f79b5d53d6ca46fd3128a3474 WatchSource:0}: Error finding container 08f1682942eb9e22f52ccf984961b2ffcced841f79b5d53d6ca46fd3128a3474: Status 404 returned error can't find the container with id 08f1682942eb9e22f52ccf984961b2ffcced841f79b5d53d6ca46fd3128a3474 Jan 27 12:41:07 crc kubenswrapper[4900]: I0127 12:41:07.466896 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vs74w" event={"ID":"a21684ab-2dff-4650-9633-4d442ce77752","Type":"ContainerStarted","Data":"08f1682942eb9e22f52ccf984961b2ffcced841f79b5d53d6ca46fd3128a3474"} Jan 27 12:41:09 crc kubenswrapper[4900]: I0127 12:41:09.485026 4900 generic.go:334] "Generic (PLEG): container finished" podID="a21684ab-2dff-4650-9633-4d442ce77752" containerID="4e1f1df13c1af0825dbb85059db3667e339b24b3af01a6d454b3df0231e85fd0" exitCode=0 Jan 27 12:41:09 crc kubenswrapper[4900]: I0127 12:41:09.485098 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vs74w" event={"ID":"a21684ab-2dff-4650-9633-4d442ce77752","Type":"ContainerDied","Data":"4e1f1df13c1af0825dbb85059db3667e339b24b3af01a6d454b3df0231e85fd0"} Jan 27 12:41:10 crc kubenswrapper[4900]: I0127 12:41:10.494104 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-kx46n" event={"ID":"f00a0f13-11d5-4ee7-9276-21722a3ce14f","Type":"ContainerStarted","Data":"6a93d35c01acd630875b30f2a557c21b5cac244d153e79f3e0d1f04a6d66dfdd"} Jan 27 12:41:10 crc kubenswrapper[4900]: I0127 12:41:10.533894 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-kx46n" podStartSLOduration=2.767581422 podStartE2EDuration="37.533862926s" podCreationTimestamp="2026-01-27 12:40:33 +0000 UTC" firstStartedPulling="2026-01-27 12:40:35.536181822 +0000 UTC m=+862.773210032" lastFinishedPulling="2026-01-27 12:41:10.302463326 +0000 UTC m=+897.539491536" observedRunningTime="2026-01-27 12:41:10.524136424 +0000 UTC m=+897.761164654" watchObservedRunningTime="2026-01-27 12:41:10.533862926 +0000 UTC m=+897.770891136" Jan 27 12:41:11 crc kubenswrapper[4900]: I0127 12:41:11.530648 4900 generic.go:334] "Generic (PLEG): container finished" podID="a21684ab-2dff-4650-9633-4d442ce77752" containerID="ab9a0ad7de5e8e9b566f492409b529022c184c538c8af33cd5286897162e5526" exitCode=0 Jan 27 12:41:11 crc kubenswrapper[4900]: I0127 12:41:11.530867 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vs74w" event={"ID":"a21684ab-2dff-4650-9633-4d442ce77752","Type":"ContainerDied","Data":"ab9a0ad7de5e8e9b566f492409b529022c184c538c8af33cd5286897162e5526"} Jan 27 12:41:12 crc kubenswrapper[4900]: I0127 12:41:12.541981 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vs74w" event={"ID":"a21684ab-2dff-4650-9633-4d442ce77752","Type":"ContainerStarted","Data":"01309e5f09ead23d051a9ba61275c35aa7497d1825e53d8d4799031c961156df"} Jan 27 12:41:12 crc kubenswrapper[4900]: I0127 12:41:12.564701 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vs74w" podStartSLOduration=4.09082951 podStartE2EDuration="6.564674644s" podCreationTimestamp="2026-01-27 12:41:06 +0000 UTC" firstStartedPulling="2026-01-27 12:41:09.487355568 +0000 UTC m=+896.724383778" lastFinishedPulling="2026-01-27 12:41:11.961200702 +0000 UTC m=+899.198228912" observedRunningTime="2026-01-27 12:41:12.561316526 +0000 UTC m=+899.798344736" watchObservedRunningTime="2026-01-27 12:41:12.564674644 +0000 UTC m=+899.801702854" Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.607427 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.608851 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.624188 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.627467 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.627782 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.627945 4900 reflector.go:368] Caches populated for *v1.Secret from object-"minio-dev"/"default-dockercfg-8f8gd" Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.639607 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5czmb\" (UniqueName: \"kubernetes.io/projected/9aeb96a7-f82c-4af8-8147-c00ffb9f9ffe-kube-api-access-5czmb\") pod \"minio\" (UID: \"9aeb96a7-f82c-4af8-8147-c00ffb9f9ffe\") " pod="minio-dev/minio" Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.639993 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-95213397-7073-45f3-8216-ff3fd55b854a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-95213397-7073-45f3-8216-ff3fd55b854a\") pod \"minio\" (UID: \"9aeb96a7-f82c-4af8-8147-c00ffb9f9ffe\") " pod="minio-dev/minio" Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.743672 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5czmb\" (UniqueName: \"kubernetes.io/projected/9aeb96a7-f82c-4af8-8147-c00ffb9f9ffe-kube-api-access-5czmb\") pod \"minio\" (UID: \"9aeb96a7-f82c-4af8-8147-c00ffb9f9ffe\") " pod="minio-dev/minio" Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.743774 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-95213397-7073-45f3-8216-ff3fd55b854a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-95213397-7073-45f3-8216-ff3fd55b854a\") pod \"minio\" (UID: \"9aeb96a7-f82c-4af8-8147-c00ffb9f9ffe\") " pod="minio-dev/minio" Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.751285 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.751389 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-95213397-7073-45f3-8216-ff3fd55b854a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-95213397-7073-45f3-8216-ff3fd55b854a\") pod \"minio\" (UID: \"9aeb96a7-f82c-4af8-8147-c00ffb9f9ffe\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1bc2396f63935ee8ea77823da72f749d9e5b37c5b0943aa81204cc8bf88af528/globalmount\"" pod="minio-dev/minio" Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.770269 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5czmb\" (UniqueName: \"kubernetes.io/projected/9aeb96a7-f82c-4af8-8147-c00ffb9f9ffe-kube-api-access-5czmb\") pod \"minio\" (UID: \"9aeb96a7-f82c-4af8-8147-c00ffb9f9ffe\") " pod="minio-dev/minio" Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.799922 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-95213397-7073-45f3-8216-ff3fd55b854a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-95213397-7073-45f3-8216-ff3fd55b854a\") pod \"minio\" (UID: \"9aeb96a7-f82c-4af8-8147-c00ffb9f9ffe\") " pod="minio-dev/minio" Jan 27 12:41:15 crc kubenswrapper[4900]: I0127 12:41:15.943618 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Jan 27 12:41:16 crc kubenswrapper[4900]: I0127 12:41:16.399406 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Jan 27 12:41:16 crc kubenswrapper[4900]: I0127 12:41:16.571724 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"9aeb96a7-f82c-4af8-8147-c00ffb9f9ffe","Type":"ContainerStarted","Data":"d5504c17ccfdebdb15ea6ac6383f250307075275c92aa1e6a9d2dc6ef2ffd5ff"} Jan 27 12:41:16 crc kubenswrapper[4900]: I0127 12:41:16.617768 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:16 crc kubenswrapper[4900]: I0127 12:41:16.619514 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:16 crc kubenswrapper[4900]: I0127 12:41:16.671685 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:17 crc kubenswrapper[4900]: I0127 12:41:17.628528 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:17 crc kubenswrapper[4900]: I0127 12:41:17.684205 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vs74w"] Jan 27 12:41:19 crc kubenswrapper[4900]: I0127 12:41:19.599830 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vs74w" podUID="a21684ab-2dff-4650-9633-4d442ce77752" containerName="registry-server" containerID="cri-o://01309e5f09ead23d051a9ba61275c35aa7497d1825e53d8d4799031c961156df" gracePeriod=2 Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.063763 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.139225 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8l765\" (UniqueName: \"kubernetes.io/projected/a21684ab-2dff-4650-9633-4d442ce77752-kube-api-access-8l765\") pod \"a21684ab-2dff-4650-9633-4d442ce77752\" (UID: \"a21684ab-2dff-4650-9633-4d442ce77752\") " Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.139333 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a21684ab-2dff-4650-9633-4d442ce77752-catalog-content\") pod \"a21684ab-2dff-4650-9633-4d442ce77752\" (UID: \"a21684ab-2dff-4650-9633-4d442ce77752\") " Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.139454 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a21684ab-2dff-4650-9633-4d442ce77752-utilities\") pod \"a21684ab-2dff-4650-9633-4d442ce77752\" (UID: \"a21684ab-2dff-4650-9633-4d442ce77752\") " Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.140508 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a21684ab-2dff-4650-9633-4d442ce77752-utilities" (OuterVolumeSpecName: "utilities") pod "a21684ab-2dff-4650-9633-4d442ce77752" (UID: "a21684ab-2dff-4650-9633-4d442ce77752"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.153400 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a21684ab-2dff-4650-9633-4d442ce77752-kube-api-access-8l765" (OuterVolumeSpecName: "kube-api-access-8l765") pod "a21684ab-2dff-4650-9633-4d442ce77752" (UID: "a21684ab-2dff-4650-9633-4d442ce77752"). InnerVolumeSpecName "kube-api-access-8l765". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.203138 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a21684ab-2dff-4650-9633-4d442ce77752-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a21684ab-2dff-4650-9633-4d442ce77752" (UID: "a21684ab-2dff-4650-9633-4d442ce77752"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.241396 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a21684ab-2dff-4650-9633-4d442ce77752-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.241461 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8l765\" (UniqueName: \"kubernetes.io/projected/a21684ab-2dff-4650-9633-4d442ce77752-kube-api-access-8l765\") on node \"crc\" DevicePath \"\"" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.241483 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a21684ab-2dff-4650-9633-4d442ce77752-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.611305 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"9aeb96a7-f82c-4af8-8147-c00ffb9f9ffe","Type":"ContainerStarted","Data":"c5a5de962ea6afe31430353c00b411ecf60dc68ca65032e734d2e151545f6032"} Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.615516 4900 generic.go:334] "Generic (PLEG): container finished" podID="a21684ab-2dff-4650-9633-4d442ce77752" containerID="01309e5f09ead23d051a9ba61275c35aa7497d1825e53d8d4799031c961156df" exitCode=0 Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.615574 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vs74w" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.615585 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vs74w" event={"ID":"a21684ab-2dff-4650-9633-4d442ce77752","Type":"ContainerDied","Data":"01309e5f09ead23d051a9ba61275c35aa7497d1825e53d8d4799031c961156df"} Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.615652 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vs74w" event={"ID":"a21684ab-2dff-4650-9633-4d442ce77752","Type":"ContainerDied","Data":"08f1682942eb9e22f52ccf984961b2ffcced841f79b5d53d6ca46fd3128a3474"} Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.615685 4900 scope.go:117] "RemoveContainer" containerID="01309e5f09ead23d051a9ba61275c35aa7497d1825e53d8d4799031c961156df" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.637688 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=4.500029631 podStartE2EDuration="7.63765546s" podCreationTimestamp="2026-01-27 12:41:13 +0000 UTC" firstStartedPulling="2026-01-27 12:41:16.416584909 +0000 UTC m=+903.653613119" lastFinishedPulling="2026-01-27 12:41:19.554210738 +0000 UTC m=+906.791238948" observedRunningTime="2026-01-27 12:41:20.6366373 +0000 UTC m=+907.873665510" watchObservedRunningTime="2026-01-27 12:41:20.63765546 +0000 UTC m=+907.874683670" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.660738 4900 scope.go:117] "RemoveContainer" containerID="ab9a0ad7de5e8e9b566f492409b529022c184c538c8af33cd5286897162e5526" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.666159 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vs74w"] Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.671814 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vs74w"] Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.741415 4900 scope.go:117] "RemoveContainer" containerID="4e1f1df13c1af0825dbb85059db3667e339b24b3af01a6d454b3df0231e85fd0" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.796101 4900 scope.go:117] "RemoveContainer" containerID="01309e5f09ead23d051a9ba61275c35aa7497d1825e53d8d4799031c961156df" Jan 27 12:41:20 crc kubenswrapper[4900]: E0127 12:41:20.796792 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01309e5f09ead23d051a9ba61275c35aa7497d1825e53d8d4799031c961156df\": container with ID starting with 01309e5f09ead23d051a9ba61275c35aa7497d1825e53d8d4799031c961156df not found: ID does not exist" containerID="01309e5f09ead23d051a9ba61275c35aa7497d1825e53d8d4799031c961156df" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.796862 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01309e5f09ead23d051a9ba61275c35aa7497d1825e53d8d4799031c961156df"} err="failed to get container status \"01309e5f09ead23d051a9ba61275c35aa7497d1825e53d8d4799031c961156df\": rpc error: code = NotFound desc = could not find container \"01309e5f09ead23d051a9ba61275c35aa7497d1825e53d8d4799031c961156df\": container with ID starting with 01309e5f09ead23d051a9ba61275c35aa7497d1825e53d8d4799031c961156df not found: ID does not exist" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.796912 4900 scope.go:117] "RemoveContainer" containerID="ab9a0ad7de5e8e9b566f492409b529022c184c538c8af33cd5286897162e5526" Jan 27 12:41:20 crc kubenswrapper[4900]: E0127 12:41:20.797776 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab9a0ad7de5e8e9b566f492409b529022c184c538c8af33cd5286897162e5526\": container with ID starting with ab9a0ad7de5e8e9b566f492409b529022c184c538c8af33cd5286897162e5526 not found: ID does not exist" containerID="ab9a0ad7de5e8e9b566f492409b529022c184c538c8af33cd5286897162e5526" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.797834 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab9a0ad7de5e8e9b566f492409b529022c184c538c8af33cd5286897162e5526"} err="failed to get container status \"ab9a0ad7de5e8e9b566f492409b529022c184c538c8af33cd5286897162e5526\": rpc error: code = NotFound desc = could not find container \"ab9a0ad7de5e8e9b566f492409b529022c184c538c8af33cd5286897162e5526\": container with ID starting with ab9a0ad7de5e8e9b566f492409b529022c184c538c8af33cd5286897162e5526 not found: ID does not exist" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.797886 4900 scope.go:117] "RemoveContainer" containerID="4e1f1df13c1af0825dbb85059db3667e339b24b3af01a6d454b3df0231e85fd0" Jan 27 12:41:20 crc kubenswrapper[4900]: E0127 12:41:20.798620 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e1f1df13c1af0825dbb85059db3667e339b24b3af01a6d454b3df0231e85fd0\": container with ID starting with 4e1f1df13c1af0825dbb85059db3667e339b24b3af01a6d454b3df0231e85fd0 not found: ID does not exist" containerID="4e1f1df13c1af0825dbb85059db3667e339b24b3af01a6d454b3df0231e85fd0" Jan 27 12:41:20 crc kubenswrapper[4900]: I0127 12:41:20.798667 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e1f1df13c1af0825dbb85059db3667e339b24b3af01a6d454b3df0231e85fd0"} err="failed to get container status \"4e1f1df13c1af0825dbb85059db3667e339b24b3af01a6d454b3df0231e85fd0\": rpc error: code = NotFound desc = could not find container \"4e1f1df13c1af0825dbb85059db3667e339b24b3af01a6d454b3df0231e85fd0\": container with ID starting with 4e1f1df13c1af0825dbb85059db3667e339b24b3af01a6d454b3df0231e85fd0 not found: ID does not exist" Jan 27 12:41:22 crc kubenswrapper[4900]: I0127 12:41:22.493363 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a21684ab-2dff-4650-9633-4d442ce77752" path="/var/lib/kubelet/pods/a21684ab-2dff-4650-9633-4d442ce77752/volumes" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.859250 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn"] Jan 27 12:41:26 crc kubenswrapper[4900]: E0127 12:41:26.859569 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a21684ab-2dff-4650-9633-4d442ce77752" containerName="extract-content" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.859583 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a21684ab-2dff-4650-9633-4d442ce77752" containerName="extract-content" Jan 27 12:41:26 crc kubenswrapper[4900]: E0127 12:41:26.859591 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a21684ab-2dff-4650-9633-4d442ce77752" containerName="extract-utilities" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.859597 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a21684ab-2dff-4650-9633-4d442ce77752" containerName="extract-utilities" Jan 27 12:41:26 crc kubenswrapper[4900]: E0127 12:41:26.859604 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a21684ab-2dff-4650-9633-4d442ce77752" containerName="registry-server" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.859611 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a21684ab-2dff-4650-9633-4d442ce77752" containerName="registry-server" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.859737 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a21684ab-2dff-4650-9633-4d442ce77752" containerName="registry-server" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.860269 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.862726 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.863247 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-948kx" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.863542 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.863969 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.864377 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.914522 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn"] Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.974709 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcdq4\" (UniqueName: \"kubernetes.io/projected/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-kube-api-access-qcdq4\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.974798 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.974853 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.974921 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-logging-loki-distributor-http\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:26 crc kubenswrapper[4900]: I0127 12:41:26.974962 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-config\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.032048 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-querier-76788598db-rg7hv"] Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.032961 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.034964 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.036455 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.036793 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.067644 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-76788598db-rg7hv"] Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.079781 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.079919 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.080084 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-logging-loki-distributor-http\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.080256 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-config\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.080391 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcdq4\" (UniqueName: \"kubernetes.io/projected/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-kube-api-access-qcdq4\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.081669 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.082571 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-config\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.107927 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.108001 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-logging-loki-distributor-http\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.130654 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcdq4\" (UniqueName: \"kubernetes.io/projected/bf3e0b5e-77aa-4f51-9cca-149e20525f8f-kube-api-access-qcdq4\") pod \"logging-loki-distributor-5f678c8dd6-k62tn\" (UID: \"bf3e0b5e-77aa-4f51-9cca-149e20525f8f\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.170577 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c"] Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.172390 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.176331 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.179073 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.182469 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-config\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.182596 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-logging-loki-ca-bundle\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.182653 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-logging-loki-querier-http\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.182725 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-logging-loki-s3\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.182782 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tglfg\" (UniqueName: \"kubernetes.io/projected/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-kube-api-access-tglfg\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.182913 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-logging-loki-querier-grpc\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.187172 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.207151 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c"] Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.284474 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbpq7\" (UniqueName: \"kubernetes.io/projected/62fdb605-a4e3-443d-9887-1ebc8218908f-kube-api-access-rbpq7\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.284545 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tglfg\" (UniqueName: \"kubernetes.io/projected/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-kube-api-access-tglfg\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.284581 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62fdb605-a4e3-443d-9887-1ebc8218908f-config\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.284619 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62fdb605-a4e3-443d-9887-1ebc8218908f-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.284645 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-logging-loki-querier-grpc\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.284666 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-config\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.284698 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-logging-loki-ca-bundle\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.284724 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-logging-loki-querier-http\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.284764 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/62fdb605-a4e3-443d-9887-1ebc8218908f-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.284790 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/62fdb605-a4e3-443d-9887-1ebc8218908f-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.284809 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-logging-loki-s3\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.289691 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-config\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.292723 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-logging-loki-querier-http\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.295490 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-logging-loki-querier-grpc\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.298286 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-logging-loki-ca-bundle\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.299292 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-logging-loki-s3\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.310805 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tglfg\" (UniqueName: \"kubernetes.io/projected/f1cfe76c-2aba-4da6-a7a7-fa01e883cb60-kube-api-access-tglfg\") pod \"logging-loki-querier-76788598db-rg7hv\" (UID: \"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60\") " pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.341933 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9"] Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.343474 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.346292 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.347834 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.348165 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.349381 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.349446 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.353782 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.358658 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm"] Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.366040 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.369147 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-vdzb9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.391291 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/62fdb605-a4e3-443d-9887-1ebc8218908f-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.391426 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/62fdb605-a4e3-443d-9887-1ebc8218908f-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.391520 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbpq7\" (UniqueName: \"kubernetes.io/projected/62fdb605-a4e3-443d-9887-1ebc8218908f-kube-api-access-rbpq7\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.391580 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62fdb605-a4e3-443d-9887-1ebc8218908f-config\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.391627 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62fdb605-a4e3-443d-9887-1ebc8218908f-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.391683 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/62fdb605-a4e3-443d-9887-1ebc8218908f-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.393010 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62fdb605-a4e3-443d-9887-1ebc8218908f-config\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.398839 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/62fdb605-a4e3-443d-9887-1ebc8218908f-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.403043 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62fdb605-a4e3-443d-9887-1ebc8218908f-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.404638 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9"] Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.412401 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm"] Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.419653 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbpq7\" (UniqueName: \"kubernetes.io/projected/62fdb605-a4e3-443d-9887-1ebc8218908f-kube-api-access-rbpq7\") pod \"logging-loki-query-frontend-69d9546745-mlj7c\" (UID: \"62fdb605-a4e3-443d-9887-1ebc8218908f\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.487007 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.493527 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-lokistack-gateway\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.493711 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8d7pp\" (UniqueName: \"kubernetes.io/projected/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-kube-api-access-8d7pp\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.493780 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/8d06d09a-f602-4b44-a4d0-2566d02321df-tls-secret\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.493823 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdqjr\" (UniqueName: \"kubernetes.io/projected/8d06d09a-f602-4b44-a4d0-2566d02321df-kube-api-access-wdqjr\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.493904 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-tenants\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.493995 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/8d06d09a-f602-4b44-a4d0-2566d02321df-tenants\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.494030 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/8d06d09a-f602-4b44-a4d0-2566d02321df-rbac\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.494100 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/8d06d09a-f602-4b44-a4d0-2566d02321df-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.494161 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d06d09a-f602-4b44-a4d0-2566d02321df-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.494197 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.494257 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.494294 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d06d09a-f602-4b44-a4d0-2566d02321df-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.494352 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.494409 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/8d06d09a-f602-4b44-a4d0-2566d02321df-lokistack-gateway\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.494443 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-rbac\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.494488 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-tls-secret\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.595543 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8d7pp\" (UniqueName: \"kubernetes.io/projected/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-kube-api-access-8d7pp\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.595602 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/8d06d09a-f602-4b44-a4d0-2566d02321df-tls-secret\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.595636 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdqjr\" (UniqueName: \"kubernetes.io/projected/8d06d09a-f602-4b44-a4d0-2566d02321df-kube-api-access-wdqjr\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: E0127 12:41:27.595945 4900 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Jan 27 12:41:27 crc kubenswrapper[4900]: E0127 12:41:27.596315 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d06d09a-f602-4b44-a4d0-2566d02321df-tls-secret podName:8d06d09a-f602-4b44-a4d0-2566d02321df nodeName:}" failed. No retries permitted until 2026-01-27 12:41:28.096170502 +0000 UTC m=+915.333198712 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/8d06d09a-f602-4b44-a4d0-2566d02321df-tls-secret") pod "logging-loki-gateway-7dbfd5bb68-zslxm" (UID: "8d06d09a-f602-4b44-a4d0-2566d02321df") : secret "logging-loki-gateway-http" not found Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.596052 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-tenants\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.597612 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/8d06d09a-f602-4b44-a4d0-2566d02321df-tenants\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.597687 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/8d06d09a-f602-4b44-a4d0-2566d02321df-rbac\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.597841 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/8d06d09a-f602-4b44-a4d0-2566d02321df-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.598751 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/8d06d09a-f602-4b44-a4d0-2566d02321df-rbac\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.599755 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d06d09a-f602-4b44-a4d0-2566d02321df-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.600535 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d06d09a-f602-4b44-a4d0-2566d02321df-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.603293 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.603409 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.603494 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d06d09a-f602-4b44-a4d0-2566d02321df-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.603540 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.603605 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/8d06d09a-f602-4b44-a4d0-2566d02321df-lokistack-gateway\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.603643 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-rbac\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.603669 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-tls-secret\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.603711 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-lokistack-gateway\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.605506 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/8d06d09a-f602-4b44-a4d0-2566d02321df-tenants\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: E0127 12:41:27.605588 4900 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Jan 27 12:41:27 crc kubenswrapper[4900]: E0127 12:41:27.605634 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-tls-secret podName:4de6c1e3-c4c6-47f9-951f-b07adc7744cf nodeName:}" failed. No retries permitted until 2026-01-27 12:41:28.105615936 +0000 UTC m=+915.342644146 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-tls-secret") pod "logging-loki-gateway-7dbfd5bb68-zmqn9" (UID: "4de6c1e3-c4c6-47f9-951f-b07adc7744cf") : secret "logging-loki-gateway-http" not found Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.606249 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/8d06d09a-f602-4b44-a4d0-2566d02321df-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.606760 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.606936 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d06d09a-f602-4b44-a4d0-2566d02321df-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.607547 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-lokistack-gateway\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.607765 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/8d06d09a-f602-4b44-a4d0-2566d02321df-lokistack-gateway\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.609677 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-rbac\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.610225 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.611108 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-tenants\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.612482 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.614494 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8d7pp\" (UniqueName: \"kubernetes.io/projected/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-kube-api-access-8d7pp\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.627733 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdqjr\" (UniqueName: \"kubernetes.io/projected/8d06d09a-f602-4b44-a4d0-2566d02321df-kube-api-access-wdqjr\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:27 crc kubenswrapper[4900]: I0127 12:41:27.918788 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn"] Jan 27 12:41:27 crc kubenswrapper[4900]: W0127 12:41:27.925932 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbf3e0b5e_77aa_4f51_9cca_149e20525f8f.slice/crio-2922dcb279f385a88aabfd74a8e1974cfda14598e713b60da463ec50f86506c5 WatchSource:0}: Error finding container 2922dcb279f385a88aabfd74a8e1974cfda14598e713b60da463ec50f86506c5: Status 404 returned error can't find the container with id 2922dcb279f385a88aabfd74a8e1974cfda14598e713b60da463ec50f86506c5 Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.022198 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.023371 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.027678 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.028670 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.070919 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.117635 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-138ca281-100b-41f5-85ec-60bf09970c08\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-138ca281-100b-41f5-85ec-60bf09970c08\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.117710 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/8d06d09a-f602-4b44-a4d0-2566d02321df-tls-secret\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.117738 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-1794901a-7086-4d2e-bae7-3a0de432c19c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1794901a-7086-4d2e-bae7-3a0de432c19c\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.117757 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e780efe8-7578-4940-b01c-c199f36d6554-config\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.117775 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vblr6\" (UniqueName: \"kubernetes.io/projected/e780efe8-7578-4940-b01c-c199f36d6554-kube-api-access-vblr6\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.117816 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e780efe8-7578-4940-b01c-c199f36d6554-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.117865 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/e780efe8-7578-4940-b01c-c199f36d6554-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.117906 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/e780efe8-7578-4940-b01c-c199f36d6554-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.117923 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/e780efe8-7578-4940-b01c-c199f36d6554-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.117946 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-tls-secret\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.131049 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/8d06d09a-f602-4b44-a4d0-2566d02321df-tls-secret\") pod \"logging-loki-gateway-7dbfd5bb68-zslxm\" (UID: \"8d06d09a-f602-4b44-a4d0-2566d02321df\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.132862 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/4de6c1e3-c4c6-47f9-951f-b07adc7744cf-tls-secret\") pod \"logging-loki-gateway-7dbfd5bb68-zmqn9\" (UID: \"4de6c1e3-c4c6-47f9-951f-b07adc7744cf\") " pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.155696 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.156850 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.168585 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.168592 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.184976 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.213021 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-76788598db-rg7hv"] Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219207 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/e780efe8-7578-4940-b01c-c199f36d6554-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219263 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/e780efe8-7578-4940-b01c-c199f36d6554-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219285 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/e780efe8-7578-4940-b01c-c199f36d6554-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219318 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219356 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219377 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-138ca281-100b-41f5-85ec-60bf09970c08\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-138ca281-100b-41f5-85ec-60bf09970c08\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219397 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e780efe8-7578-4940-b01c-c199f36d6554-config\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219417 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-1794901a-7086-4d2e-bae7-3a0de432c19c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1794901a-7086-4d2e-bae7-3a0de432c19c\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219436 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vblr6\" (UniqueName: \"kubernetes.io/projected/e780efe8-7578-4940-b01c-c199f36d6554-kube-api-access-vblr6\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219457 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-config\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219475 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219502 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-ae383121-5983-49ec-84ec-77f80067e382\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ae383121-5983-49ec-84ec-77f80067e382\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219526 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e780efe8-7578-4940-b01c-c199f36d6554-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219543 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.219560 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59h6z\" (UniqueName: \"kubernetes.io/projected/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-kube-api-access-59h6z\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.220940 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e780efe8-7578-4940-b01c-c199f36d6554-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.221531 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c"] Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.221695 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e780efe8-7578-4940-b01c-c199f36d6554-config\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.225375 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/e780efe8-7578-4940-b01c-c199f36d6554-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.225882 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/e780efe8-7578-4940-b01c-c199f36d6554-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.233480 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/e780efe8-7578-4940-b01c-c199f36d6554-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.239016 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.239099 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-1794901a-7086-4d2e-bae7-3a0de432c19c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1794901a-7086-4d2e-bae7-3a0de432c19c\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/45d6b3b4d47c85331a85d143cf1fefce0ae63f843c740228d8b98c74377c19da/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.239893 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.239929 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-138ca281-100b-41f5-85ec-60bf09970c08\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-138ca281-100b-41f5-85ec-60bf09970c08\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/490e32a4c565bdde6d6710a8b0a2312300c1ad20741683604b8efe2d735b338d/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.240732 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vblr6\" (UniqueName: \"kubernetes.io/projected/e780efe8-7578-4940-b01c-c199f36d6554-kube-api-access-vblr6\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.271932 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.286194 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.309970 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.311099 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.314374 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.314648 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.320642 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-ae383121-5983-49ec-84ec-77f80067e382\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ae383121-5983-49ec-84ec-77f80067e382\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.320793 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.320872 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59h6z\" (UniqueName: \"kubernetes.io/projected/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-kube-api-access-59h6z\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.321099 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.321342 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.321493 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-config\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.321586 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.321848 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.321950 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.324149 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-config\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.324427 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-1794901a-7086-4d2e-bae7-3a0de432c19c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1794901a-7086-4d2e-bae7-3a0de432c19c\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.327797 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.327859 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-ae383121-5983-49ec-84ec-77f80067e382\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ae383121-5983-49ec-84ec-77f80067e382\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/bbbc13907f29d8517004460d160ccb34c6cb362d90700dd28148b1aa4c309e25/globalmount\"" pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.328536 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.328862 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.338395 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.344974 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59h6z\" (UniqueName: \"kubernetes.io/projected/904dcfed-5ddb-4cb9-bac8-8feb64b3bab4-kube-api-access-59h6z\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.359892 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-138ca281-100b-41f5-85ec-60bf09970c08\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-138ca281-100b-41f5-85ec-60bf09970c08\") pod \"logging-loki-ingester-0\" (UID: \"e780efe8-7578-4940-b01c-c199f36d6554\") " pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.369634 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-ae383121-5983-49ec-84ec-77f80067e382\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ae383121-5983-49ec-84ec-77f80067e382\") pod \"logging-loki-compactor-0\" (UID: \"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4\") " pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.424004 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/7e0b7978-27f2-42e9-8116-59384da3719b-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.424143 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7579da74-f173-4a9a-a494-8682cc2887e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7579da74-f173-4a9a-a494-8682cc2887e1\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.424226 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7e0b7978-27f2-42e9-8116-59384da3719b-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.424256 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e0b7978-27f2-42e9-8116-59384da3719b-config\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.424301 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cs68\" (UniqueName: \"kubernetes.io/projected/7e0b7978-27f2-42e9-8116-59384da3719b-kube-api-access-2cs68\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.424324 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/7e0b7978-27f2-42e9-8116-59384da3719b-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.424390 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/7e0b7978-27f2-42e9-8116-59384da3719b-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.483299 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.525889 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/7e0b7978-27f2-42e9-8116-59384da3719b-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.525997 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/7e0b7978-27f2-42e9-8116-59384da3719b-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.526035 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7579da74-f173-4a9a-a494-8682cc2887e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7579da74-f173-4a9a-a494-8682cc2887e1\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.526203 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7e0b7978-27f2-42e9-8116-59384da3719b-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.526228 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e0b7978-27f2-42e9-8116-59384da3719b-config\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.526274 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cs68\" (UniqueName: \"kubernetes.io/projected/7e0b7978-27f2-42e9-8116-59384da3719b-kube-api-access-2cs68\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.526310 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/7e0b7978-27f2-42e9-8116-59384da3719b-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.527616 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7e0b7978-27f2-42e9-8116-59384da3719b-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.529097 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e0b7978-27f2-42e9-8116-59384da3719b-config\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.533139 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/7e0b7978-27f2-42e9-8116-59384da3719b-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.543258 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.543334 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7579da74-f173-4a9a-a494-8682cc2887e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7579da74-f173-4a9a-a494-8682cc2887e1\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/7300459abfe7ae90d29c89af58e68334c5053568363c698f4f854d8038e386d2/globalmount\"" pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.547304 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/7e0b7978-27f2-42e9-8116-59384da3719b-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.547325 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/7e0b7978-27f2-42e9-8116-59384da3719b-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.549723 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cs68\" (UniqueName: \"kubernetes.io/projected/7e0b7978-27f2-42e9-8116-59384da3719b-kube-api-access-2cs68\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.571744 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9"] Jan 27 12:41:28 crc kubenswrapper[4900]: W0127 12:41:28.574633 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4de6c1e3_c4c6_47f9_951f_b07adc7744cf.slice/crio-e0c153f412a0f7a6451971d5c4e5b82a5b03d9bf2275390f21ea4944f8ebc6e8 WatchSource:0}: Error finding container e0c153f412a0f7a6451971d5c4e5b82a5b03d9bf2275390f21ea4944f8ebc6e8: Status 404 returned error can't find the container with id e0c153f412a0f7a6451971d5c4e5b82a5b03d9bf2275390f21ea4944f8ebc6e8 Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.583168 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7579da74-f173-4a9a-a494-8682cc2887e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7579da74-f173-4a9a-a494-8682cc2887e1\") pod \"logging-loki-index-gateway-0\" (UID: \"7e0b7978-27f2-42e9-8116-59384da3719b\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.642259 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.660423 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.706893 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" event={"ID":"4de6c1e3-c4c6-47f9-951f-b07adc7744cf","Type":"ContainerStarted","Data":"e0c153f412a0f7a6451971d5c4e5b82a5b03d9bf2275390f21ea4944f8ebc6e8"} Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.736181 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" event={"ID":"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60","Type":"ContainerStarted","Data":"9467e6d18ed109748853e324c2cc01e585ebacc9ac25204864c93989bae08258"} Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.739109 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" event={"ID":"bf3e0b5e-77aa-4f51-9cca-149e20525f8f","Type":"ContainerStarted","Data":"2922dcb279f385a88aabfd74a8e1974cfda14598e713b60da463ec50f86506c5"} Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.740744 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" event={"ID":"62fdb605-a4e3-443d-9887-1ebc8218908f","Type":"ContainerStarted","Data":"3a57abd69141755d4e5e1d7c156a055700187dae8260b5ef91243c003e355597"} Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.893268 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm"] Jan 27 12:41:28 crc kubenswrapper[4900]: W0127 12:41:28.897634 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d06d09a_f602_4b44_a4d0_2566d02321df.slice/crio-a784dfddc1658511f1ecddc6987c1f25da5a84d241cc67a61af542710eed3631 WatchSource:0}: Error finding container a784dfddc1658511f1ecddc6987c1f25da5a84d241cc67a61af542710eed3631: Status 404 returned error can't find the container with id a784dfddc1658511f1ecddc6987c1f25da5a84d241cc67a61af542710eed3631 Jan 27 12:41:28 crc kubenswrapper[4900]: I0127 12:41:28.982019 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Jan 27 12:41:29 crc kubenswrapper[4900]: I0127 12:41:29.155598 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Jan 27 12:41:29 crc kubenswrapper[4900]: I0127 12:41:29.217104 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Jan 27 12:41:29 crc kubenswrapper[4900]: W0127 12:41:29.217993 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7e0b7978_27f2_42e9_8116_59384da3719b.slice/crio-c0d0b90ea2c14aa20a13252d6991a46736eaee566a587bc01a36f9761feb6ff3 WatchSource:0}: Error finding container c0d0b90ea2c14aa20a13252d6991a46736eaee566a587bc01a36f9761feb6ff3: Status 404 returned error can't find the container with id c0d0b90ea2c14aa20a13252d6991a46736eaee566a587bc01a36f9761feb6ff3 Jan 27 12:41:29 crc kubenswrapper[4900]: I0127 12:41:29.749964 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"e780efe8-7578-4940-b01c-c199f36d6554","Type":"ContainerStarted","Data":"450a87a9d57f4aef9a050f54172a8e678f47dfb5fd223c0e86b88d1ee147871c"} Jan 27 12:41:29 crc kubenswrapper[4900]: I0127 12:41:29.751445 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" event={"ID":"8d06d09a-f602-4b44-a4d0-2566d02321df","Type":"ContainerStarted","Data":"a784dfddc1658511f1ecddc6987c1f25da5a84d241cc67a61af542710eed3631"} Jan 27 12:41:29 crc kubenswrapper[4900]: I0127 12:41:29.754340 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4","Type":"ContainerStarted","Data":"e1a2ffb1077e32d93e3303ef4f791c90ee4383dd54182d14d00daec432f788df"} Jan 27 12:41:29 crc kubenswrapper[4900]: I0127 12:41:29.755435 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"7e0b7978-27f2-42e9-8116-59384da3719b","Type":"ContainerStarted","Data":"c0d0b90ea2c14aa20a13252d6991a46736eaee566a587bc01a36f9761feb6ff3"} Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.832730 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" event={"ID":"8d06d09a-f602-4b44-a4d0-2566d02321df","Type":"ContainerStarted","Data":"6491974a874cf8ed90ed51fe8e3721ee3f116fcd0e23821f46eba1f725e35eef"} Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.835678 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" event={"ID":"bf3e0b5e-77aa-4f51-9cca-149e20525f8f","Type":"ContainerStarted","Data":"36b11014885af6021b0e237ec8e3fac3f845e8ce5512e4c4fcd28d0a5f9e84fc"} Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.836005 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.838583 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"904dcfed-5ddb-4cb9-bac8-8feb64b3bab4","Type":"ContainerStarted","Data":"1c945c177dd315ed59f0d718364b21e0580b05999d6931470cd9c3fdd622a6f4"} Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.838805 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.841144 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" event={"ID":"62fdb605-a4e3-443d-9887-1ebc8218908f","Type":"ContainerStarted","Data":"882d45d80f60acae74028ec1d92931cfa1c2f3abbbf78e233b4c5e333cfce6c5"} Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.841297 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.844872 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"7e0b7978-27f2-42e9-8116-59384da3719b","Type":"ContainerStarted","Data":"5b9276948e607c19b4f52624ba6d879f4112a0af116189a60293d443977d84ef"} Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.845039 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.847129 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" event={"ID":"4de6c1e3-c4c6-47f9-951f-b07adc7744cf","Type":"ContainerStarted","Data":"ce69383b4f739badbaf5fe4a0abe18f443f287bec6004ca2ec6ed0a506df24dc"} Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.849402 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"e780efe8-7578-4940-b01c-c199f36d6554","Type":"ContainerStarted","Data":"da6627a7d8e23ff90e8bf205f193071a9c389f7485575df5b8c01e3a755141c6"} Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.849655 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.850840 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" event={"ID":"f1cfe76c-2aba-4da6-a7a7-fa01e883cb60","Type":"ContainerStarted","Data":"1475a99295920bd03855e3bd9c709b61fc1ff5f5eaa1c01fde23bd6761249772"} Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.851375 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.863801 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" podStartSLOduration=2.792489908 podStartE2EDuration="7.863775449s" podCreationTimestamp="2026-01-27 12:41:26 +0000 UTC" firstStartedPulling="2026-01-27 12:41:27.928082666 +0000 UTC m=+915.165110876" lastFinishedPulling="2026-01-27 12:41:32.999368207 +0000 UTC m=+920.236396417" observedRunningTime="2026-01-27 12:41:33.858802894 +0000 UTC m=+921.095831104" watchObservedRunningTime="2026-01-27 12:41:33.863775449 +0000 UTC m=+921.100803649" Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.887491 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" podStartSLOduration=2.143140106 podStartE2EDuration="6.887464028s" podCreationTimestamp="2026-01-27 12:41:27 +0000 UTC" firstStartedPulling="2026-01-27 12:41:28.256613831 +0000 UTC m=+915.493642041" lastFinishedPulling="2026-01-27 12:41:33.000937743 +0000 UTC m=+920.237965963" observedRunningTime="2026-01-27 12:41:33.881242497 +0000 UTC m=+921.118270717" watchObservedRunningTime="2026-01-27 12:41:33.887464028 +0000 UTC m=+921.124492238" Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.910200 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" podStartSLOduration=2.071057459 podStartE2EDuration="6.910179378s" podCreationTimestamp="2026-01-27 12:41:27 +0000 UTC" firstStartedPulling="2026-01-27 12:41:28.221723846 +0000 UTC m=+915.458752056" lastFinishedPulling="2026-01-27 12:41:33.060845765 +0000 UTC m=+920.297873975" observedRunningTime="2026-01-27 12:41:33.906304616 +0000 UTC m=+921.143332826" watchObservedRunningTime="2026-01-27 12:41:33.910179378 +0000 UTC m=+921.147207588" Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.943504 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-ingester-0" podStartSLOduration=4.109797993 podStartE2EDuration="7.943475997s" podCreationTimestamp="2026-01-27 12:41:26 +0000 UTC" firstStartedPulling="2026-01-27 12:41:29.167596878 +0000 UTC m=+916.404625088" lastFinishedPulling="2026-01-27 12:41:33.001274882 +0000 UTC m=+920.238303092" observedRunningTime="2026-01-27 12:41:33.939545613 +0000 UTC m=+921.176573823" watchObservedRunningTime="2026-01-27 12:41:33.943475997 +0000 UTC m=+921.180504207" Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.972306 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-compactor-0" podStartSLOduration=3.053538506 podStartE2EDuration="6.972271174s" podCreationTimestamp="2026-01-27 12:41:27 +0000 UTC" firstStartedPulling="2026-01-27 12:41:29.004136824 +0000 UTC m=+916.241165034" lastFinishedPulling="2026-01-27 12:41:32.922869492 +0000 UTC m=+920.159897702" observedRunningTime="2026-01-27 12:41:33.966902218 +0000 UTC m=+921.203930448" watchObservedRunningTime="2026-01-27 12:41:33.972271174 +0000 UTC m=+921.209299394" Jan 27 12:41:33 crc kubenswrapper[4900]: I0127 12:41:33.996413 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-index-gateway-0" podStartSLOduration=3.218288448 podStartE2EDuration="6.996369025s" podCreationTimestamp="2026-01-27 12:41:27 +0000 UTC" firstStartedPulling="2026-01-27 12:41:29.221674201 +0000 UTC m=+916.458702411" lastFinishedPulling="2026-01-27 12:41:32.999754778 +0000 UTC m=+920.236782988" observedRunningTime="2026-01-27 12:41:33.987354903 +0000 UTC m=+921.224383123" watchObservedRunningTime="2026-01-27 12:41:33.996369025 +0000 UTC m=+921.233397235" Jan 27 12:41:35 crc kubenswrapper[4900]: I0127 12:41:35.871193 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" event={"ID":"8d06d09a-f602-4b44-a4d0-2566d02321df","Type":"ContainerStarted","Data":"bcd6c9ae7fad6628e3ca996439485380ce56b7045ab75785800cf62ed860519b"} Jan 27 12:41:35 crc kubenswrapper[4900]: I0127 12:41:35.872153 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:35 crc kubenswrapper[4900]: I0127 12:41:35.873038 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zslxm container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.56:8083/ready\": dial tcp 10.217.0.56:8083: connect: connection refused" start-of-body= Jan 27 12:41:35 crc kubenswrapper[4900]: I0127 12:41:35.873110 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" podUID="8d06d09a-f602-4b44-a4d0-2566d02321df" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.56:8083/ready\": dial tcp 10.217.0.56:8083: connect: connection refused" Jan 27 12:41:35 crc kubenswrapper[4900]: I0127 12:41:35.874701 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" event={"ID":"4de6c1e3-c4c6-47f9-951f-b07adc7744cf","Type":"ContainerStarted","Data":"bad6e779208f5384a18ca69fb7acb92d365fdce69771e90479b76949ed3e12e6"} Jan 27 12:41:35 crc kubenswrapper[4900]: I0127 12:41:35.874975 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:35 crc kubenswrapper[4900]: I0127 12:41:35.883272 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:35 crc kubenswrapper[4900]: I0127 12:41:35.901630 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" podStartSLOduration=2.21786567 podStartE2EDuration="8.90160803s" podCreationTimestamp="2026-01-27 12:41:27 +0000 UTC" firstStartedPulling="2026-01-27 12:41:28.90293136 +0000 UTC m=+916.139959570" lastFinishedPulling="2026-01-27 12:41:35.58667373 +0000 UTC m=+922.823701930" observedRunningTime="2026-01-27 12:41:35.896944524 +0000 UTC m=+923.133972734" watchObservedRunningTime="2026-01-27 12:41:35.90160803 +0000 UTC m=+923.138636240" Jan 27 12:41:35 crc kubenswrapper[4900]: I0127 12:41:35.928257 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" podStartSLOduration=1.921463949 podStartE2EDuration="8.928232334s" podCreationTimestamp="2026-01-27 12:41:27 +0000 UTC" firstStartedPulling="2026-01-27 12:41:28.577663559 +0000 UTC m=+915.814691759" lastFinishedPulling="2026-01-27 12:41:35.584431934 +0000 UTC m=+922.821460144" observedRunningTime="2026-01-27 12:41:35.92154729 +0000 UTC m=+923.158575500" watchObservedRunningTime="2026-01-27 12:41:35.928232334 +0000 UTC m=+923.165260534" Jan 27 12:41:36 crc kubenswrapper[4900]: I0127 12:41:36.883583 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:36 crc kubenswrapper[4900]: I0127 12:41:36.883668 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:36 crc kubenswrapper[4900]: I0127 12:41:36.894485 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:36 crc kubenswrapper[4900]: I0127 12:41:36.895277 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" Jan 27 12:41:36 crc kubenswrapper[4900]: I0127 12:41:36.897034 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" Jan 27 12:41:44 crc kubenswrapper[4900]: I0127 12:41:44.159484 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6qpqw"] Jan 27 12:41:44 crc kubenswrapper[4900]: I0127 12:41:44.163468 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:44 crc kubenswrapper[4900]: I0127 12:41:44.185501 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6qpqw"] Jan 27 12:41:44 crc kubenswrapper[4900]: I0127 12:41:44.241611 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/227dbf3e-4b03-4533-abc3-0e88e0970086-utilities\") pod \"community-operators-6qpqw\" (UID: \"227dbf3e-4b03-4533-abc3-0e88e0970086\") " pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:44 crc kubenswrapper[4900]: I0127 12:41:44.241755 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ltrb\" (UniqueName: \"kubernetes.io/projected/227dbf3e-4b03-4533-abc3-0e88e0970086-kube-api-access-9ltrb\") pod \"community-operators-6qpqw\" (UID: \"227dbf3e-4b03-4533-abc3-0e88e0970086\") " pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:44 crc kubenswrapper[4900]: I0127 12:41:44.241833 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/227dbf3e-4b03-4533-abc3-0e88e0970086-catalog-content\") pod \"community-operators-6qpqw\" (UID: \"227dbf3e-4b03-4533-abc3-0e88e0970086\") " pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:44 crc kubenswrapper[4900]: I0127 12:41:44.343746 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ltrb\" (UniqueName: \"kubernetes.io/projected/227dbf3e-4b03-4533-abc3-0e88e0970086-kube-api-access-9ltrb\") pod \"community-operators-6qpqw\" (UID: \"227dbf3e-4b03-4533-abc3-0e88e0970086\") " pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:44 crc kubenswrapper[4900]: I0127 12:41:44.343824 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/227dbf3e-4b03-4533-abc3-0e88e0970086-catalog-content\") pod \"community-operators-6qpqw\" (UID: \"227dbf3e-4b03-4533-abc3-0e88e0970086\") " pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:44 crc kubenswrapper[4900]: I0127 12:41:44.343914 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/227dbf3e-4b03-4533-abc3-0e88e0970086-utilities\") pod \"community-operators-6qpqw\" (UID: \"227dbf3e-4b03-4533-abc3-0e88e0970086\") " pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:44 crc kubenswrapper[4900]: I0127 12:41:44.344620 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/227dbf3e-4b03-4533-abc3-0e88e0970086-utilities\") pod \"community-operators-6qpqw\" (UID: \"227dbf3e-4b03-4533-abc3-0e88e0970086\") " pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:44 crc kubenswrapper[4900]: I0127 12:41:44.344659 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/227dbf3e-4b03-4533-abc3-0e88e0970086-catalog-content\") pod \"community-operators-6qpqw\" (UID: \"227dbf3e-4b03-4533-abc3-0e88e0970086\") " pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:44 crc kubenswrapper[4900]: I0127 12:41:44.366052 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ltrb\" (UniqueName: \"kubernetes.io/projected/227dbf3e-4b03-4533-abc3-0e88e0970086-kube-api-access-9ltrb\") pod \"community-operators-6qpqw\" (UID: \"227dbf3e-4b03-4533-abc3-0e88e0970086\") " pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:44 crc kubenswrapper[4900]: I0127 12:41:44.525599 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:45 crc kubenswrapper[4900]: I0127 12:41:45.904755 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6qpqw"] Jan 27 12:41:45 crc kubenswrapper[4900]: W0127 12:41:45.944878 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod227dbf3e_4b03_4533_abc3_0e88e0970086.slice/crio-f0b1a396b3a1d916364c7762f7833796012b22c06264cf20327bc5deb6a2ac17 WatchSource:0}: Error finding container f0b1a396b3a1d916364c7762f7833796012b22c06264cf20327bc5deb6a2ac17: Status 404 returned error can't find the container with id f0b1a396b3a1d916364c7762f7833796012b22c06264cf20327bc5deb6a2ac17 Jan 27 12:41:45 crc kubenswrapper[4900]: I0127 12:41:45.972967 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6qpqw" event={"ID":"227dbf3e-4b03-4533-abc3-0e88e0970086","Type":"ContainerStarted","Data":"f0b1a396b3a1d916364c7762f7833796012b22c06264cf20327bc5deb6a2ac17"} Jan 27 12:41:47 crc kubenswrapper[4900]: I0127 12:41:47.991110 4900 generic.go:334] "Generic (PLEG): container finished" podID="227dbf3e-4b03-4533-abc3-0e88e0970086" containerID="2e2869e7cfefe19bbb647713bbe59b3d0d427e916900d120687317010730623e" exitCode=0 Jan 27 12:41:47 crc kubenswrapper[4900]: I0127 12:41:47.991513 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6qpqw" event={"ID":"227dbf3e-4b03-4533-abc3-0e88e0970086","Type":"ContainerDied","Data":"2e2869e7cfefe19bbb647713bbe59b3d0d427e916900d120687317010730623e"} Jan 27 12:41:48 crc kubenswrapper[4900]: I0127 12:41:48.494491 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Jan 27 12:41:48 crc kubenswrapper[4900]: I0127 12:41:48.653598 4900 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Jan 27 12:41:48 crc kubenswrapper[4900]: I0127 12:41:48.653686 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="e780efe8-7578-4940-b01c-c199f36d6554" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 27 12:41:48 crc kubenswrapper[4900]: I0127 12:41:48.668305 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Jan 27 12:41:50 crc kubenswrapper[4900]: I0127 12:41:50.007548 4900 generic.go:334] "Generic (PLEG): container finished" podID="227dbf3e-4b03-4533-abc3-0e88e0970086" containerID="c3de6230a7e93cb0d0fcb78754a63f56eed322808ae959a97718a541647f9e5c" exitCode=0 Jan 27 12:41:50 crc kubenswrapper[4900]: I0127 12:41:50.007633 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6qpqw" event={"ID":"227dbf3e-4b03-4533-abc3-0e88e0970086","Type":"ContainerDied","Data":"c3de6230a7e93cb0d0fcb78754a63f56eed322808ae959a97718a541647f9e5c"} Jan 27 12:41:51 crc kubenswrapper[4900]: I0127 12:41:51.018701 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6qpqw" event={"ID":"227dbf3e-4b03-4533-abc3-0e88e0970086","Type":"ContainerStarted","Data":"887bdd5114267c57c96ad97cd0f3c0554abeeaadec1c6ec03395bb71ada4e4be"} Jan 27 12:41:51 crc kubenswrapper[4900]: I0127 12:41:51.047935 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6qpqw" podStartSLOduration=4.51508486 podStartE2EDuration="7.047911638s" podCreationTimestamp="2026-01-27 12:41:44 +0000 UTC" firstStartedPulling="2026-01-27 12:41:47.994162068 +0000 UTC m=+935.231190278" lastFinishedPulling="2026-01-27 12:41:50.526988846 +0000 UTC m=+937.764017056" observedRunningTime="2026-01-27 12:41:51.044509969 +0000 UTC m=+938.281538179" watchObservedRunningTime="2026-01-27 12:41:51.047911638 +0000 UTC m=+938.284939848" Jan 27 12:41:52 crc kubenswrapper[4900]: I0127 12:41:52.373133 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:41:52 crc kubenswrapper[4900]: I0127 12:41:52.373224 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:41:54 crc kubenswrapper[4900]: I0127 12:41:54.525803 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:54 crc kubenswrapper[4900]: I0127 12:41:54.525882 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:54 crc kubenswrapper[4900]: I0127 12:41:54.571841 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:55 crc kubenswrapper[4900]: I0127 12:41:55.096907 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:55 crc kubenswrapper[4900]: I0127 12:41:55.147618 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6qpqw"] Jan 27 12:41:57 crc kubenswrapper[4900]: I0127 12:41:57.065945 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6qpqw" podUID="227dbf3e-4b03-4533-abc3-0e88e0970086" containerName="registry-server" containerID="cri-o://887bdd5114267c57c96ad97cd0f3c0554abeeaadec1c6ec03395bb71ada4e4be" gracePeriod=2 Jan 27 12:41:57 crc kubenswrapper[4900]: I0127 12:41:57.194602 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 12:41:57 crc kubenswrapper[4900]: I0127 12:41:57.365629 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 12:41:57 crc kubenswrapper[4900]: I0127 12:41:57.512890 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 12:41:58 crc kubenswrapper[4900]: I0127 12:41:58.077597 4900 generic.go:334] "Generic (PLEG): container finished" podID="227dbf3e-4b03-4533-abc3-0e88e0970086" containerID="887bdd5114267c57c96ad97cd0f3c0554abeeaadec1c6ec03395bb71ada4e4be" exitCode=0 Jan 27 12:41:58 crc kubenswrapper[4900]: I0127 12:41:58.077661 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6qpqw" event={"ID":"227dbf3e-4b03-4533-abc3-0e88e0970086","Type":"ContainerDied","Data":"887bdd5114267c57c96ad97cd0f3c0554abeeaadec1c6ec03395bb71ada4e4be"} Jan 27 12:41:58 crc kubenswrapper[4900]: I0127 12:41:58.148337 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:58 crc kubenswrapper[4900]: I0127 12:41:58.232364 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ltrb\" (UniqueName: \"kubernetes.io/projected/227dbf3e-4b03-4533-abc3-0e88e0970086-kube-api-access-9ltrb\") pod \"227dbf3e-4b03-4533-abc3-0e88e0970086\" (UID: \"227dbf3e-4b03-4533-abc3-0e88e0970086\") " Jan 27 12:41:58 crc kubenswrapper[4900]: I0127 12:41:58.232511 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/227dbf3e-4b03-4533-abc3-0e88e0970086-utilities\") pod \"227dbf3e-4b03-4533-abc3-0e88e0970086\" (UID: \"227dbf3e-4b03-4533-abc3-0e88e0970086\") " Jan 27 12:41:58 crc kubenswrapper[4900]: I0127 12:41:58.232718 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/227dbf3e-4b03-4533-abc3-0e88e0970086-catalog-content\") pod \"227dbf3e-4b03-4533-abc3-0e88e0970086\" (UID: \"227dbf3e-4b03-4533-abc3-0e88e0970086\") " Jan 27 12:41:58 crc kubenswrapper[4900]: I0127 12:41:58.233970 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/227dbf3e-4b03-4533-abc3-0e88e0970086-utilities" (OuterVolumeSpecName: "utilities") pod "227dbf3e-4b03-4533-abc3-0e88e0970086" (UID: "227dbf3e-4b03-4533-abc3-0e88e0970086"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:41:58 crc kubenswrapper[4900]: I0127 12:41:58.234542 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/227dbf3e-4b03-4533-abc3-0e88e0970086-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:41:58 crc kubenswrapper[4900]: I0127 12:41:58.240212 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/227dbf3e-4b03-4533-abc3-0e88e0970086-kube-api-access-9ltrb" (OuterVolumeSpecName: "kube-api-access-9ltrb") pod "227dbf3e-4b03-4533-abc3-0e88e0970086" (UID: "227dbf3e-4b03-4533-abc3-0e88e0970086"). InnerVolumeSpecName "kube-api-access-9ltrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:41:58 crc kubenswrapper[4900]: I0127 12:41:58.298643 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/227dbf3e-4b03-4533-abc3-0e88e0970086-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "227dbf3e-4b03-4533-abc3-0e88e0970086" (UID: "227dbf3e-4b03-4533-abc3-0e88e0970086"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:41:58 crc kubenswrapper[4900]: I0127 12:41:58.336612 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ltrb\" (UniqueName: \"kubernetes.io/projected/227dbf3e-4b03-4533-abc3-0e88e0970086-kube-api-access-9ltrb\") on node \"crc\" DevicePath \"\"" Jan 27 12:41:58 crc kubenswrapper[4900]: I0127 12:41:58.336667 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/227dbf3e-4b03-4533-abc3-0e88e0970086-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:41:58 crc kubenswrapper[4900]: I0127 12:41:58.648089 4900 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Jan 27 12:41:58 crc kubenswrapper[4900]: I0127 12:41:58.648169 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="e780efe8-7578-4940-b01c-c199f36d6554" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 27 12:41:59 crc kubenswrapper[4900]: I0127 12:41:59.094539 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6qpqw" event={"ID":"227dbf3e-4b03-4533-abc3-0e88e0970086","Type":"ContainerDied","Data":"f0b1a396b3a1d916364c7762f7833796012b22c06264cf20327bc5deb6a2ac17"} Jan 27 12:41:59 crc kubenswrapper[4900]: I0127 12:41:59.094647 4900 scope.go:117] "RemoveContainer" containerID="887bdd5114267c57c96ad97cd0f3c0554abeeaadec1c6ec03395bb71ada4e4be" Jan 27 12:41:59 crc kubenswrapper[4900]: I0127 12:41:59.095521 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6qpqw" Jan 27 12:41:59 crc kubenswrapper[4900]: I0127 12:41:59.119335 4900 scope.go:117] "RemoveContainer" containerID="c3de6230a7e93cb0d0fcb78754a63f56eed322808ae959a97718a541647f9e5c" Jan 27 12:41:59 crc kubenswrapper[4900]: I0127 12:41:59.122483 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6qpqw"] Jan 27 12:41:59 crc kubenswrapper[4900]: I0127 12:41:59.135507 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6qpqw"] Jan 27 12:41:59 crc kubenswrapper[4900]: I0127 12:41:59.144701 4900 scope.go:117] "RemoveContainer" containerID="2e2869e7cfefe19bbb647713bbe59b3d0d427e916900d120687317010730623e" Jan 27 12:42:00 crc kubenswrapper[4900]: I0127 12:42:00.494214 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="227dbf3e-4b03-4533-abc3-0e88e0970086" path="/var/lib/kubelet/pods/227dbf3e-4b03-4533-abc3-0e88e0970086/volumes" Jan 27 12:42:08 crc kubenswrapper[4900]: I0127 12:42:08.646697 4900 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Jan 27 12:42:08 crc kubenswrapper[4900]: I0127 12:42:08.647652 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="e780efe8-7578-4940-b01c-c199f36d6554" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 27 12:42:18 crc kubenswrapper[4900]: I0127 12:42:18.649086 4900 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Jan 27 12:42:18 crc kubenswrapper[4900]: I0127 12:42:18.649617 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="e780efe8-7578-4940-b01c-c199f36d6554" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 27 12:42:22 crc kubenswrapper[4900]: I0127 12:42:22.372786 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:42:22 crc kubenswrapper[4900]: I0127 12:42:22.373167 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:42:28 crc kubenswrapper[4900]: I0127 12:42:28.648182 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.115464 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-wc92p"] Jan 27 12:42:46 crc kubenswrapper[4900]: E0127 12:42:46.116783 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="227dbf3e-4b03-4533-abc3-0e88e0970086" containerName="registry-server" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.116806 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="227dbf3e-4b03-4533-abc3-0e88e0970086" containerName="registry-server" Jan 27 12:42:46 crc kubenswrapper[4900]: E0127 12:42:46.116828 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="227dbf3e-4b03-4533-abc3-0e88e0970086" containerName="extract-content" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.116834 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="227dbf3e-4b03-4533-abc3-0e88e0970086" containerName="extract-content" Jan 27 12:42:46 crc kubenswrapper[4900]: E0127 12:42:46.116849 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="227dbf3e-4b03-4533-abc3-0e88e0970086" containerName="extract-utilities" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.116857 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="227dbf3e-4b03-4533-abc3-0e88e0970086" containerName="extract-utilities" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.117022 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="227dbf3e-4b03-4533-abc3-0e88e0970086" containerName="registry-server" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.117921 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.125850 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.126041 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.126279 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.126509 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.126862 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-69qvr" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.134747 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.183458 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-wc92p"] Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.201093 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-wc92p"] Jan 27 12:42:46 crc kubenswrapper[4900]: E0127 12:42:46.201483 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-4lvm2 metrics sa-token tmp trusted-ca], unattached volumes=[], failed to process volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-4lvm2 metrics sa-token tmp trusted-ca]: context canceled" pod="openshift-logging/collector-wc92p" podUID="39953208-1f17-4b4b-acb4-023c977192fa" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.302581 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-config-openshift-service-cacrt\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.302662 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/39953208-1f17-4b4b-acb4-023c977192fa-datadir\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.302739 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-entrypoint\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.302889 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-collector-token\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.302981 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/39953208-1f17-4b4b-acb4-023c977192fa-tmp\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.303043 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-metrics\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.303207 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-config\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.303283 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4lvm2\" (UniqueName: \"kubernetes.io/projected/39953208-1f17-4b4b-acb4-023c977192fa-kube-api-access-4lvm2\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.303338 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-trusted-ca\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.303426 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-collector-syslog-receiver\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.303646 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/39953208-1f17-4b4b-acb4-023c977192fa-sa-token\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.406409 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/39953208-1f17-4b4b-acb4-023c977192fa-sa-token\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.406550 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-config-openshift-service-cacrt\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.406589 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/39953208-1f17-4b4b-acb4-023c977192fa-datadir\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.406638 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-entrypoint\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.406671 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-collector-token\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.406713 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/39953208-1f17-4b4b-acb4-023c977192fa-tmp\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.406745 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-metrics\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.406823 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-config\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.406867 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4lvm2\" (UniqueName: \"kubernetes.io/projected/39953208-1f17-4b4b-acb4-023c977192fa-kube-api-access-4lvm2\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.406902 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-trusted-ca\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.406950 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-collector-syslog-receiver\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.407491 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-config-openshift-service-cacrt\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.408142 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-config\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.408778 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-trusted-ca\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.409312 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-entrypoint\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.409364 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/39953208-1f17-4b4b-acb4-023c977192fa-datadir\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.413471 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-collector-token\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.413861 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/39953208-1f17-4b4b-acb4-023c977192fa-tmp\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.414373 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-collector-syslog-receiver\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.430386 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-metrics\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.435554 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4lvm2\" (UniqueName: \"kubernetes.io/projected/39953208-1f17-4b4b-acb4-023c977192fa-kube-api-access-4lvm2\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.435902 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/39953208-1f17-4b4b-acb4-023c977192fa-sa-token\") pod \"collector-wc92p\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.495849 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.508155 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-wc92p" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.610177 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/39953208-1f17-4b4b-acb4-023c977192fa-sa-token\") pod \"39953208-1f17-4b4b-acb4-023c977192fa\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.610277 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-config-openshift-service-cacrt\") pod \"39953208-1f17-4b4b-acb4-023c977192fa\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.610370 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-metrics\") pod \"39953208-1f17-4b4b-acb4-023c977192fa\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.610418 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-trusted-ca\") pod \"39953208-1f17-4b4b-acb4-023c977192fa\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.610465 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/39953208-1f17-4b4b-acb4-023c977192fa-tmp\") pod \"39953208-1f17-4b4b-acb4-023c977192fa\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.610540 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4lvm2\" (UniqueName: \"kubernetes.io/projected/39953208-1f17-4b4b-acb4-023c977192fa-kube-api-access-4lvm2\") pod \"39953208-1f17-4b4b-acb4-023c977192fa\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.610589 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-entrypoint\") pod \"39953208-1f17-4b4b-acb4-023c977192fa\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.610661 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-collector-syslog-receiver\") pod \"39953208-1f17-4b4b-acb4-023c977192fa\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.610742 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-config\") pod \"39953208-1f17-4b4b-acb4-023c977192fa\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.610816 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-collector-token\") pod \"39953208-1f17-4b4b-acb4-023c977192fa\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.610845 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/39953208-1f17-4b4b-acb4-023c977192fa-datadir\") pod \"39953208-1f17-4b4b-acb4-023c977192fa\" (UID: \"39953208-1f17-4b4b-acb4-023c977192fa\") " Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.611713 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/39953208-1f17-4b4b-acb4-023c977192fa-datadir" (OuterVolumeSpecName: "datadir") pod "39953208-1f17-4b4b-acb4-023c977192fa" (UID: "39953208-1f17-4b4b-acb4-023c977192fa"). InnerVolumeSpecName "datadir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.612596 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-entrypoint" (OuterVolumeSpecName: "entrypoint") pod "39953208-1f17-4b4b-acb4-023c977192fa" (UID: "39953208-1f17-4b4b-acb4-023c977192fa"). InnerVolumeSpecName "entrypoint". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.613026 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "39953208-1f17-4b4b-acb4-023c977192fa" (UID: "39953208-1f17-4b4b-acb4-023c977192fa"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.613204 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-config-openshift-service-cacrt" (OuterVolumeSpecName: "config-openshift-service-cacrt") pod "39953208-1f17-4b4b-acb4-023c977192fa" (UID: "39953208-1f17-4b4b-acb4-023c977192fa"). InnerVolumeSpecName "config-openshift-service-cacrt". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.614835 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-config" (OuterVolumeSpecName: "config") pod "39953208-1f17-4b4b-acb4-023c977192fa" (UID: "39953208-1f17-4b4b-acb4-023c977192fa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.616538 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39953208-1f17-4b4b-acb4-023c977192fa-tmp" (OuterVolumeSpecName: "tmp") pod "39953208-1f17-4b4b-acb4-023c977192fa" (UID: "39953208-1f17-4b4b-acb4-023c977192fa"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.616549 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-metrics" (OuterVolumeSpecName: "metrics") pod "39953208-1f17-4b4b-acb4-023c977192fa" (UID: "39953208-1f17-4b4b-acb4-023c977192fa"). InnerVolumeSpecName "metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.616642 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39953208-1f17-4b4b-acb4-023c977192fa-sa-token" (OuterVolumeSpecName: "sa-token") pod "39953208-1f17-4b4b-acb4-023c977192fa" (UID: "39953208-1f17-4b4b-acb4-023c977192fa"). InnerVolumeSpecName "sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.617395 4900 reconciler_common.go:293] "Volume detached for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-entrypoint\") on node \"crc\" DevicePath \"\"" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.617515 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.617614 4900 reconciler_common.go:293] "Volume detached for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/39953208-1f17-4b4b-acb4-023c977192fa-datadir\") on node \"crc\" DevicePath \"\"" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.617700 4900 reconciler_common.go:293] "Volume detached for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/39953208-1f17-4b4b-acb4-023c977192fa-sa-token\") on node \"crc\" DevicePath \"\"" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.617791 4900 reconciler_common.go:293] "Volume detached for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-config-openshift-service-cacrt\") on node \"crc\" DevicePath \"\"" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.617883 4900 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-metrics\") on node \"crc\" DevicePath \"\"" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.618197 4900 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/39953208-1f17-4b4b-acb4-023c977192fa-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.618289 4900 reconciler_common.go:293] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/39953208-1f17-4b4b-acb4-023c977192fa-tmp\") on node \"crc\" DevicePath \"\"" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.618615 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39953208-1f17-4b4b-acb4-023c977192fa-kube-api-access-4lvm2" (OuterVolumeSpecName: "kube-api-access-4lvm2") pod "39953208-1f17-4b4b-acb4-023c977192fa" (UID: "39953208-1f17-4b4b-acb4-023c977192fa"). InnerVolumeSpecName "kube-api-access-4lvm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.619293 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-collector-token" (OuterVolumeSpecName: "collector-token") pod "39953208-1f17-4b4b-acb4-023c977192fa" (UID: "39953208-1f17-4b4b-acb4-023c977192fa"). InnerVolumeSpecName "collector-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.620338 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-collector-syslog-receiver" (OuterVolumeSpecName: "collector-syslog-receiver") pod "39953208-1f17-4b4b-acb4-023c977192fa" (UID: "39953208-1f17-4b4b-acb4-023c977192fa"). InnerVolumeSpecName "collector-syslog-receiver". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.720100 4900 reconciler_common.go:293] "Volume detached for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-collector-syslog-receiver\") on node \"crc\" DevicePath \"\"" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.720141 4900 reconciler_common.go:293] "Volume detached for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/39953208-1f17-4b4b-acb4-023c977192fa-collector-token\") on node \"crc\" DevicePath \"\"" Jan 27 12:42:46 crc kubenswrapper[4900]: I0127 12:42:46.720159 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4lvm2\" (UniqueName: \"kubernetes.io/projected/39953208-1f17-4b4b-acb4-023c977192fa-kube-api-access-4lvm2\") on node \"crc\" DevicePath \"\"" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.503636 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-wc92p" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.575602 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-wc92p"] Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.589345 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-logging/collector-wc92p"] Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.601342 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-b68pd"] Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.602965 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.606373 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-b68pd"] Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.606797 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.607083 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.607340 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.607764 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-69qvr" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.609487 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.615547 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.635767 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-entrypoint\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.635834 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-trusted-ca\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.635936 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-config\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.635975 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-collector-token\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.636020 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qcnm\" (UniqueName: \"kubernetes.io/projected/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-kube-api-access-5qcnm\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.636070 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-config-openshift-service-cacrt\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.636109 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-metrics\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.636134 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-tmp\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.636155 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-collector-syslog-receiver\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.636183 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-sa-token\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.636203 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-datadir\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.738241 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-entrypoint\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.738316 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-trusted-ca\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.738362 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-config\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.738394 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-collector-token\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.738434 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qcnm\" (UniqueName: \"kubernetes.io/projected/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-kube-api-access-5qcnm\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.738468 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-config-openshift-service-cacrt\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.738516 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-metrics\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.738569 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-tmp\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.738597 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-collector-syslog-receiver\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.738618 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-sa-token\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.738649 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-datadir\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.738833 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-datadir\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.739760 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-entrypoint\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.739901 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-trusted-ca\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.740371 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-config-openshift-service-cacrt\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.740894 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-config\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.744684 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-tmp\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.745052 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-collector-token\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.747519 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-metrics\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.747970 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-collector-syslog-receiver\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.757514 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qcnm\" (UniqueName: \"kubernetes.io/projected/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-kube-api-access-5qcnm\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.763169 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/3124b61f-99eb-425d-a3c8-7d69f32e1dd0-sa-token\") pod \"collector-b68pd\" (UID: \"3124b61f-99eb-425d-a3c8-7d69f32e1dd0\") " pod="openshift-logging/collector-b68pd" Jan 27 12:42:47 crc kubenswrapper[4900]: I0127 12:42:47.923357 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-b68pd" Jan 27 12:42:48 crc kubenswrapper[4900]: I0127 12:42:48.430321 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-b68pd"] Jan 27 12:42:48 crc kubenswrapper[4900]: I0127 12:42:48.496812 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39953208-1f17-4b4b-acb4-023c977192fa" path="/var/lib/kubelet/pods/39953208-1f17-4b4b-acb4-023c977192fa/volumes" Jan 27 12:42:48 crc kubenswrapper[4900]: I0127 12:42:48.515195 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-b68pd" event={"ID":"3124b61f-99eb-425d-a3c8-7d69f32e1dd0","Type":"ContainerStarted","Data":"2d89c13b116b015493abefe347012f1d53dd393bc60f107d6c9566aa66f9bea9"} Jan 27 12:42:52 crc kubenswrapper[4900]: I0127 12:42:52.372488 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:42:52 crc kubenswrapper[4900]: I0127 12:42:52.373175 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:42:52 crc kubenswrapper[4900]: I0127 12:42:52.373253 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:42:52 crc kubenswrapper[4900]: I0127 12:42:52.374254 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3985ee48c72db877d5168b811e894771c936712a44e12eed08928d84a04baff9"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 12:42:52 crc kubenswrapper[4900]: I0127 12:42:52.374328 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://3985ee48c72db877d5168b811e894771c936712a44e12eed08928d84a04baff9" gracePeriod=600 Jan 27 12:42:52 crc kubenswrapper[4900]: I0127 12:42:52.552468 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="3985ee48c72db877d5168b811e894771c936712a44e12eed08928d84a04baff9" exitCode=0 Jan 27 12:42:52 crc kubenswrapper[4900]: I0127 12:42:52.552494 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"3985ee48c72db877d5168b811e894771c936712a44e12eed08928d84a04baff9"} Jan 27 12:42:52 crc kubenswrapper[4900]: I0127 12:42:52.552781 4900 scope.go:117] "RemoveContainer" containerID="13cb1cf2412c834432c235dfcd87c0da123068bd752d1f31f0e730cd7a97b24e" Jan 27 12:42:55 crc kubenswrapper[4900]: I0127 12:42:55.579004 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-b68pd" event={"ID":"3124b61f-99eb-425d-a3c8-7d69f32e1dd0","Type":"ContainerStarted","Data":"a35153076f2feaa62a89ea59f703e751e07b18dda69d7645a6106f7b8973e279"} Jan 27 12:42:55 crc kubenswrapper[4900]: I0127 12:42:55.584827 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"311b2446296261b95a4a8935d2688a7bfd4e27781be2ac18543d5aed5bad7b0a"} Jan 27 12:42:55 crc kubenswrapper[4900]: I0127 12:42:55.610692 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/collector-b68pd" podStartSLOduration=1.955729251 podStartE2EDuration="8.610656942s" podCreationTimestamp="2026-01-27 12:42:47 +0000 UTC" firstStartedPulling="2026-01-27 12:42:48.437905848 +0000 UTC m=+995.674934048" lastFinishedPulling="2026-01-27 12:42:55.092833519 +0000 UTC m=+1002.329861739" observedRunningTime="2026-01-27 12:42:55.605213284 +0000 UTC m=+1002.842241504" watchObservedRunningTime="2026-01-27 12:42:55.610656942 +0000 UTC m=+1002.847685152" Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.018325 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr"] Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.020950 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.025105 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.046116 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr"] Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.219900 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/62bc015b-4274-4927-94d3-b5c2519e4a72-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr\" (UID: \"62bc015b-4274-4927-94d3-b5c2519e4a72\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.220720 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/62bc015b-4274-4927-94d3-b5c2519e4a72-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr\" (UID: \"62bc015b-4274-4927-94d3-b5c2519e4a72\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.220899 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzbks\" (UniqueName: \"kubernetes.io/projected/62bc015b-4274-4927-94d3-b5c2519e4a72-kube-api-access-nzbks\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr\" (UID: \"62bc015b-4274-4927-94d3-b5c2519e4a72\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.323273 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/62bc015b-4274-4927-94d3-b5c2519e4a72-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr\" (UID: \"62bc015b-4274-4927-94d3-b5c2519e4a72\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.323429 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzbks\" (UniqueName: \"kubernetes.io/projected/62bc015b-4274-4927-94d3-b5c2519e4a72-kube-api-access-nzbks\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr\" (UID: \"62bc015b-4274-4927-94d3-b5c2519e4a72\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.323536 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/62bc015b-4274-4927-94d3-b5c2519e4a72-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr\" (UID: \"62bc015b-4274-4927-94d3-b5c2519e4a72\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.324620 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/62bc015b-4274-4927-94d3-b5c2519e4a72-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr\" (UID: \"62bc015b-4274-4927-94d3-b5c2519e4a72\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.324685 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/62bc015b-4274-4927-94d3-b5c2519e4a72-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr\" (UID: \"62bc015b-4274-4927-94d3-b5c2519e4a72\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.352558 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzbks\" (UniqueName: \"kubernetes.io/projected/62bc015b-4274-4927-94d3-b5c2519e4a72-kube-api-access-nzbks\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr\" (UID: \"62bc015b-4274-4927-94d3-b5c2519e4a72\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.645017 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" Jan 27 12:43:31 crc kubenswrapper[4900]: I0127 12:43:31.904512 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr"] Jan 27 12:43:32 crc kubenswrapper[4900]: I0127 12:43:32.914665 4900 generic.go:334] "Generic (PLEG): container finished" podID="62bc015b-4274-4927-94d3-b5c2519e4a72" containerID="0641b5fadebb3a7dd8cac508fce1579d1d69b123880540ba99539ba6ad9c5379" exitCode=0 Jan 27 12:43:32 crc kubenswrapper[4900]: I0127 12:43:32.914807 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" event={"ID":"62bc015b-4274-4927-94d3-b5c2519e4a72","Type":"ContainerDied","Data":"0641b5fadebb3a7dd8cac508fce1579d1d69b123880540ba99539ba6ad9c5379"} Jan 27 12:43:32 crc kubenswrapper[4900]: I0127 12:43:32.915155 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" event={"ID":"62bc015b-4274-4927-94d3-b5c2519e4a72","Type":"ContainerStarted","Data":"8ed93e388fbb586a092539a45d87e64558a4937b26abd329bac07cc59b5c5217"} Jan 27 12:43:32 crc kubenswrapper[4900]: I0127 12:43:32.917256 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 12:43:34 crc kubenswrapper[4900]: I0127 12:43:34.935610 4900 generic.go:334] "Generic (PLEG): container finished" podID="62bc015b-4274-4927-94d3-b5c2519e4a72" containerID="e4ed4a8331c633f5ab8e2bdd1a88ba915b8e347e2638d366737c2cbc4df1e540" exitCode=0 Jan 27 12:43:34 crc kubenswrapper[4900]: I0127 12:43:34.935662 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" event={"ID":"62bc015b-4274-4927-94d3-b5c2519e4a72","Type":"ContainerDied","Data":"e4ed4a8331c633f5ab8e2bdd1a88ba915b8e347e2638d366737c2cbc4df1e540"} Jan 27 12:43:35 crc kubenswrapper[4900]: I0127 12:43:35.949149 4900 generic.go:334] "Generic (PLEG): container finished" podID="62bc015b-4274-4927-94d3-b5c2519e4a72" containerID="ae85b0b675730c96ba9046f3acbd0c22cff0404e348f5dd670b3972bb82c9059" exitCode=0 Jan 27 12:43:35 crc kubenswrapper[4900]: I0127 12:43:35.949302 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" event={"ID":"62bc015b-4274-4927-94d3-b5c2519e4a72","Type":"ContainerDied","Data":"ae85b0b675730c96ba9046f3acbd0c22cff0404e348f5dd670b3972bb82c9059"} Jan 27 12:43:37 crc kubenswrapper[4900]: I0127 12:43:37.448937 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" Jan 27 12:43:37 crc kubenswrapper[4900]: I0127 12:43:37.581113 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/62bc015b-4274-4927-94d3-b5c2519e4a72-util\") pod \"62bc015b-4274-4927-94d3-b5c2519e4a72\" (UID: \"62bc015b-4274-4927-94d3-b5c2519e4a72\") " Jan 27 12:43:37 crc kubenswrapper[4900]: I0127 12:43:37.581224 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/62bc015b-4274-4927-94d3-b5c2519e4a72-bundle\") pod \"62bc015b-4274-4927-94d3-b5c2519e4a72\" (UID: \"62bc015b-4274-4927-94d3-b5c2519e4a72\") " Jan 27 12:43:37 crc kubenswrapper[4900]: I0127 12:43:37.581340 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzbks\" (UniqueName: \"kubernetes.io/projected/62bc015b-4274-4927-94d3-b5c2519e4a72-kube-api-access-nzbks\") pod \"62bc015b-4274-4927-94d3-b5c2519e4a72\" (UID: \"62bc015b-4274-4927-94d3-b5c2519e4a72\") " Jan 27 12:43:37 crc kubenswrapper[4900]: I0127 12:43:37.583145 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62bc015b-4274-4927-94d3-b5c2519e4a72-bundle" (OuterVolumeSpecName: "bundle") pod "62bc015b-4274-4927-94d3-b5c2519e4a72" (UID: "62bc015b-4274-4927-94d3-b5c2519e4a72"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:43:37 crc kubenswrapper[4900]: I0127 12:43:37.591585 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62bc015b-4274-4927-94d3-b5c2519e4a72-kube-api-access-nzbks" (OuterVolumeSpecName: "kube-api-access-nzbks") pod "62bc015b-4274-4927-94d3-b5c2519e4a72" (UID: "62bc015b-4274-4927-94d3-b5c2519e4a72"). InnerVolumeSpecName "kube-api-access-nzbks". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:43:37 crc kubenswrapper[4900]: I0127 12:43:37.683403 4900 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/62bc015b-4274-4927-94d3-b5c2519e4a72-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:43:37 crc kubenswrapper[4900]: I0127 12:43:37.683465 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzbks\" (UniqueName: \"kubernetes.io/projected/62bc015b-4274-4927-94d3-b5c2519e4a72-kube-api-access-nzbks\") on node \"crc\" DevicePath \"\"" Jan 27 12:43:37 crc kubenswrapper[4900]: I0127 12:43:37.895693 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62bc015b-4274-4927-94d3-b5c2519e4a72-util" (OuterVolumeSpecName: "util") pod "62bc015b-4274-4927-94d3-b5c2519e4a72" (UID: "62bc015b-4274-4927-94d3-b5c2519e4a72"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:43:37 crc kubenswrapper[4900]: I0127 12:43:37.989820 4900 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/62bc015b-4274-4927-94d3-b5c2519e4a72-util\") on node \"crc\" DevicePath \"\"" Jan 27 12:43:38 crc kubenswrapper[4900]: I0127 12:43:38.060288 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" Jan 27 12:43:38 crc kubenswrapper[4900]: I0127 12:43:38.060300 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr" event={"ID":"62bc015b-4274-4927-94d3-b5c2519e4a72","Type":"ContainerDied","Data":"8ed93e388fbb586a092539a45d87e64558a4937b26abd329bac07cc59b5c5217"} Jan 27 12:43:38 crc kubenswrapper[4900]: I0127 12:43:38.060495 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ed93e388fbb586a092539a45d87e64558a4937b26abd329bac07cc59b5c5217" Jan 27 12:43:42 crc kubenswrapper[4900]: I0127 12:43:42.822100 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-b9gpn"] Jan 27 12:43:42 crc kubenswrapper[4900]: E0127 12:43:42.823668 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62bc015b-4274-4927-94d3-b5c2519e4a72" containerName="util" Jan 27 12:43:42 crc kubenswrapper[4900]: I0127 12:43:42.823735 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="62bc015b-4274-4927-94d3-b5c2519e4a72" containerName="util" Jan 27 12:43:42 crc kubenswrapper[4900]: E0127 12:43:42.823801 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62bc015b-4274-4927-94d3-b5c2519e4a72" containerName="pull" Jan 27 12:43:42 crc kubenswrapper[4900]: I0127 12:43:42.823810 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="62bc015b-4274-4927-94d3-b5c2519e4a72" containerName="pull" Jan 27 12:43:42 crc kubenswrapper[4900]: E0127 12:43:42.823831 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62bc015b-4274-4927-94d3-b5c2519e4a72" containerName="extract" Jan 27 12:43:42 crc kubenswrapper[4900]: I0127 12:43:42.823840 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="62bc015b-4274-4927-94d3-b5c2519e4a72" containerName="extract" Jan 27 12:43:42 crc kubenswrapper[4900]: I0127 12:43:42.824290 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="62bc015b-4274-4927-94d3-b5c2519e4a72" containerName="extract" Jan 27 12:43:42 crc kubenswrapper[4900]: I0127 12:43:42.826330 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-b9gpn" Jan 27 12:43:42 crc kubenswrapper[4900]: I0127 12:43:42.829608 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 27 12:43:42 crc kubenswrapper[4900]: I0127 12:43:42.830345 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-6zzwk" Jan 27 12:43:42 crc kubenswrapper[4900]: I0127 12:43:42.833750 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 27 12:43:42 crc kubenswrapper[4900]: I0127 12:43:42.836435 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-b9gpn"] Jan 27 12:43:42 crc kubenswrapper[4900]: I0127 12:43:42.860554 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v52v5\" (UniqueName: \"kubernetes.io/projected/e0e01a34-8c1e-46a7-b427-7cbe320855fe-kube-api-access-v52v5\") pod \"nmstate-operator-646758c888-b9gpn\" (UID: \"e0e01a34-8c1e-46a7-b427-7cbe320855fe\") " pod="openshift-nmstate/nmstate-operator-646758c888-b9gpn" Jan 27 12:43:42 crc kubenswrapper[4900]: I0127 12:43:42.961968 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v52v5\" (UniqueName: \"kubernetes.io/projected/e0e01a34-8c1e-46a7-b427-7cbe320855fe-kube-api-access-v52v5\") pod \"nmstate-operator-646758c888-b9gpn\" (UID: \"e0e01a34-8c1e-46a7-b427-7cbe320855fe\") " pod="openshift-nmstate/nmstate-operator-646758c888-b9gpn" Jan 27 12:43:42 crc kubenswrapper[4900]: I0127 12:43:42.993007 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v52v5\" (UniqueName: \"kubernetes.io/projected/e0e01a34-8c1e-46a7-b427-7cbe320855fe-kube-api-access-v52v5\") pod \"nmstate-operator-646758c888-b9gpn\" (UID: \"e0e01a34-8c1e-46a7-b427-7cbe320855fe\") " pod="openshift-nmstate/nmstate-operator-646758c888-b9gpn" Jan 27 12:43:43 crc kubenswrapper[4900]: I0127 12:43:43.149207 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-b9gpn" Jan 27 12:43:43 crc kubenswrapper[4900]: I0127 12:43:43.689751 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-b9gpn"] Jan 27 12:43:44 crc kubenswrapper[4900]: I0127 12:43:44.416349 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-b9gpn" event={"ID":"e0e01a34-8c1e-46a7-b427-7cbe320855fe","Type":"ContainerStarted","Data":"95bdf58c6662973d4278cfc54d930354bde240b2ccb5c8736b0bd9699b8fd84c"} Jan 27 12:43:47 crc kubenswrapper[4900]: I0127 12:43:47.456696 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-b9gpn" event={"ID":"e0e01a34-8c1e-46a7-b427-7cbe320855fe","Type":"ContainerStarted","Data":"63f0de7812d8aece9b41b8856d4b0d613f7d932e8c47804322b090ba599ed144"} Jan 27 12:43:47 crc kubenswrapper[4900]: I0127 12:43:47.489490 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-b9gpn" podStartSLOduration=2.59201288 podStartE2EDuration="5.489437758s" podCreationTimestamp="2026-01-27 12:43:42 +0000 UTC" firstStartedPulling="2026-01-27 12:43:43.713092684 +0000 UTC m=+1050.950120894" lastFinishedPulling="2026-01-27 12:43:46.610517562 +0000 UTC m=+1053.847545772" observedRunningTime="2026-01-27 12:43:47.483684531 +0000 UTC m=+1054.720712751" watchObservedRunningTime="2026-01-27 12:43:47.489437758 +0000 UTC m=+1054.726465968" Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.702332 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-cd2qx"] Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.712234 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-cd2qx" Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.718312 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-wtql2" Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.751102 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf"] Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.753451 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.758749 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.789680 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-wgnlg"] Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.792443 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.806765 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf"] Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.892956 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b1058137-9f30-4107-a5a2-1a2edf16cbce-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-82kkf\" (UID: \"b1058137-9f30-4107-a5a2-1a2edf16cbce\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.893484 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gjwb\" (UniqueName: \"kubernetes.io/projected/e2425ef6-315b-4e1c-8004-27599038a670-kube-api-access-6gjwb\") pod \"nmstate-metrics-54757c584b-cd2qx\" (UID: \"e2425ef6-315b-4e1c-8004-27599038a670\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-cd2qx" Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.893704 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e78c9b0a-3d34-4f6c-9c65-0ae63482fff7-ovs-socket\") pod \"nmstate-handler-wgnlg\" (UID: \"e78c9b0a-3d34-4f6c-9c65-0ae63482fff7\") " pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.893959 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e78c9b0a-3d34-4f6c-9c65-0ae63482fff7-dbus-socket\") pod \"nmstate-handler-wgnlg\" (UID: \"e78c9b0a-3d34-4f6c-9c65-0ae63482fff7\") " pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.894160 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkdh8\" (UniqueName: \"kubernetes.io/projected/b1058137-9f30-4107-a5a2-1a2edf16cbce-kube-api-access-zkdh8\") pod \"nmstate-webhook-8474b5b9d8-82kkf\" (UID: \"b1058137-9f30-4107-a5a2-1a2edf16cbce\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.894461 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e78c9b0a-3d34-4f6c-9c65-0ae63482fff7-nmstate-lock\") pod \"nmstate-handler-wgnlg\" (UID: \"e78c9b0a-3d34-4f6c-9c65-0ae63482fff7\") " pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.902125 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnpbc\" (UniqueName: \"kubernetes.io/projected/e78c9b0a-3d34-4f6c-9c65-0ae63482fff7-kube-api-access-gnpbc\") pod \"nmstate-handler-wgnlg\" (UID: \"e78c9b0a-3d34-4f6c-9c65-0ae63482fff7\") " pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:51 crc kubenswrapper[4900]: I0127 12:43:51.895132 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-cd2qx"] Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.170098 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkdh8\" (UniqueName: \"kubernetes.io/projected/b1058137-9f30-4107-a5a2-1a2edf16cbce-kube-api-access-zkdh8\") pod \"nmstate-webhook-8474b5b9d8-82kkf\" (UID: \"b1058137-9f30-4107-a5a2-1a2edf16cbce\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.170439 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e78c9b0a-3d34-4f6c-9c65-0ae63482fff7-nmstate-lock\") pod \"nmstate-handler-wgnlg\" (UID: \"e78c9b0a-3d34-4f6c-9c65-0ae63482fff7\") " pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.170511 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnpbc\" (UniqueName: \"kubernetes.io/projected/e78c9b0a-3d34-4f6c-9c65-0ae63482fff7-kube-api-access-gnpbc\") pod \"nmstate-handler-wgnlg\" (UID: \"e78c9b0a-3d34-4f6c-9c65-0ae63482fff7\") " pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.170644 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b1058137-9f30-4107-a5a2-1a2edf16cbce-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-82kkf\" (UID: \"b1058137-9f30-4107-a5a2-1a2edf16cbce\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.170736 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gjwb\" (UniqueName: \"kubernetes.io/projected/e2425ef6-315b-4e1c-8004-27599038a670-kube-api-access-6gjwb\") pod \"nmstate-metrics-54757c584b-cd2qx\" (UID: \"e2425ef6-315b-4e1c-8004-27599038a670\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-cd2qx" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.170909 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e78c9b0a-3d34-4f6c-9c65-0ae63482fff7-ovs-socket\") pod \"nmstate-handler-wgnlg\" (UID: \"e78c9b0a-3d34-4f6c-9c65-0ae63482fff7\") " pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.170943 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e78c9b0a-3d34-4f6c-9c65-0ae63482fff7-dbus-socket\") pod \"nmstate-handler-wgnlg\" (UID: \"e78c9b0a-3d34-4f6c-9c65-0ae63482fff7\") " pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.171368 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e78c9b0a-3d34-4f6c-9c65-0ae63482fff7-dbus-socket\") pod \"nmstate-handler-wgnlg\" (UID: \"e78c9b0a-3d34-4f6c-9c65-0ae63482fff7\") " pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.171592 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e78c9b0a-3d34-4f6c-9c65-0ae63482fff7-nmstate-lock\") pod \"nmstate-handler-wgnlg\" (UID: \"e78c9b0a-3d34-4f6c-9c65-0ae63482fff7\") " pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:52 crc kubenswrapper[4900]: E0127 12:43:52.171627 4900 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.171991 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e78c9b0a-3d34-4f6c-9c65-0ae63482fff7-ovs-socket\") pod \"nmstate-handler-wgnlg\" (UID: \"e78c9b0a-3d34-4f6c-9c65-0ae63482fff7\") " pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:52 crc kubenswrapper[4900]: E0127 12:43:52.172217 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b1058137-9f30-4107-a5a2-1a2edf16cbce-tls-key-pair podName:b1058137-9f30-4107-a5a2-1a2edf16cbce nodeName:}" failed. No retries permitted until 2026-01-27 12:43:52.671975031 +0000 UTC m=+1059.909003241 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/b1058137-9f30-4107-a5a2-1a2edf16cbce-tls-key-pair") pod "nmstate-webhook-8474b5b9d8-82kkf" (UID: "b1058137-9f30-4107-a5a2-1a2edf16cbce") : secret "openshift-nmstate-webhook" not found Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.195567 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkdh8\" (UniqueName: \"kubernetes.io/projected/b1058137-9f30-4107-a5a2-1a2edf16cbce-kube-api-access-zkdh8\") pod \"nmstate-webhook-8474b5b9d8-82kkf\" (UID: \"b1058137-9f30-4107-a5a2-1a2edf16cbce\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.218122 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnpbc\" (UniqueName: \"kubernetes.io/projected/e78c9b0a-3d34-4f6c-9c65-0ae63482fff7-kube-api-access-gnpbc\") pod \"nmstate-handler-wgnlg\" (UID: \"e78c9b0a-3d34-4f6c-9c65-0ae63482fff7\") " pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.221069 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gjwb\" (UniqueName: \"kubernetes.io/projected/e2425ef6-315b-4e1c-8004-27599038a670-kube-api-access-6gjwb\") pod \"nmstate-metrics-54757c584b-cd2qx\" (UID: \"e2425ef6-315b-4e1c-8004-27599038a670\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-cd2qx" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.248683 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks"] Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.250488 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.264247 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.264270 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.264647 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-46gdk" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.267412 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks"] Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.272501 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/c4f31091-fba1-4fcb-9dd8-929b07b8fc42-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-mrgks\" (UID: \"c4f31091-fba1-4fcb-9dd8-929b07b8fc42\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.272569 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/c4f31091-fba1-4fcb-9dd8-929b07b8fc42-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-mrgks\" (UID: \"c4f31091-fba1-4fcb-9dd8-929b07b8fc42\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.272678 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pg9z4\" (UniqueName: \"kubernetes.io/projected/c4f31091-fba1-4fcb-9dd8-929b07b8fc42-kube-api-access-pg9z4\") pod \"nmstate-console-plugin-7754f76f8b-mrgks\" (UID: \"c4f31091-fba1-4fcb-9dd8-929b07b8fc42\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.331969 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-cd2qx" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.374243 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/c4f31091-fba1-4fcb-9dd8-929b07b8fc42-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-mrgks\" (UID: \"c4f31091-fba1-4fcb-9dd8-929b07b8fc42\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" Jan 27 12:43:52 crc kubenswrapper[4900]: E0127 12:43:52.375087 4900 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Jan 27 12:43:52 crc kubenswrapper[4900]: E0127 12:43:52.375405 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c4f31091-fba1-4fcb-9dd8-929b07b8fc42-plugin-serving-cert podName:c4f31091-fba1-4fcb-9dd8-929b07b8fc42 nodeName:}" failed. No retries permitted until 2026-01-27 12:43:52.875341335 +0000 UTC m=+1060.112369545 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/c4f31091-fba1-4fcb-9dd8-929b07b8fc42-plugin-serving-cert") pod "nmstate-console-plugin-7754f76f8b-mrgks" (UID: "c4f31091-fba1-4fcb-9dd8-929b07b8fc42") : secret "plugin-serving-cert" not found Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.375167 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/c4f31091-fba1-4fcb-9dd8-929b07b8fc42-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-mrgks\" (UID: \"c4f31091-fba1-4fcb-9dd8-929b07b8fc42\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.376112 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pg9z4\" (UniqueName: \"kubernetes.io/projected/c4f31091-fba1-4fcb-9dd8-929b07b8fc42-kube-api-access-pg9z4\") pod \"nmstate-console-plugin-7754f76f8b-mrgks\" (UID: \"c4f31091-fba1-4fcb-9dd8-929b07b8fc42\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.376813 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/c4f31091-fba1-4fcb-9dd8-929b07b8fc42-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-mrgks\" (UID: \"c4f31091-fba1-4fcb-9dd8-929b07b8fc42\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.408244 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pg9z4\" (UniqueName: \"kubernetes.io/projected/c4f31091-fba1-4fcb-9dd8-929b07b8fc42-kube-api-access-pg9z4\") pod \"nmstate-console-plugin-7754f76f8b-mrgks\" (UID: \"c4f31091-fba1-4fcb-9dd8-929b07b8fc42\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.580161 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.699822 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b1058137-9f30-4107-a5a2-1a2edf16cbce-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-82kkf\" (UID: \"b1058137-9f30-4107-a5a2-1a2edf16cbce\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.730052 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-5d4d5db777-x4428"] Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.731436 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b1058137-9f30-4107-a5a2-1a2edf16cbce-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-82kkf\" (UID: \"b1058137-9f30-4107-a5a2-1a2edf16cbce\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.738614 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5d4d5db777-x4428"] Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.740901 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.971253 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.979283 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-serving-cert\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.986323 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkf8q\" (UniqueName: \"kubernetes.io/projected/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-kube-api-access-lkf8q\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.986851 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/c4f31091-fba1-4fcb-9dd8-929b07b8fc42-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-mrgks\" (UID: \"c4f31091-fba1-4fcb-9dd8-929b07b8fc42\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.994116 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-trusted-ca-bundle\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.994311 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-service-ca\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.994415 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-config\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.994517 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-oauth-serving-cert\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:52 crc kubenswrapper[4900]: I0127 12:43:52.994589 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-oauth-config\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:52.999292 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/c4f31091-fba1-4fcb-9dd8-929b07b8fc42-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-mrgks\" (UID: \"c4f31091-fba1-4fcb-9dd8-929b07b8fc42\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.097525 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-trusted-ca-bundle\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.097595 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-service-ca\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.097633 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-config\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.097664 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-oauth-serving-cert\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.097694 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-oauth-config\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.097780 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-serving-cert\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.097820 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkf8q\" (UniqueName: \"kubernetes.io/projected/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-kube-api-access-lkf8q\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.099340 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-trusted-ca-bundle\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.099369 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-service-ca\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.099627 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-oauth-serving-cert\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.100326 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-config\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.105124 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-serving-cert\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.106819 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-oauth-config\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.132206 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkf8q\" (UniqueName: \"kubernetes.io/projected/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-kube-api-access-lkf8q\") pod \"console-5d4d5db777-x4428\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.177710 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.333956 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.398835 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-cd2qx"] Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.709396 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-wgnlg" event={"ID":"e78c9b0a-3d34-4f6c-9c65-0ae63482fff7","Type":"ContainerStarted","Data":"08ffb1b0be0ab0fa1222db360b4494ccd9584ce7de227e06b0b8c14746262a7d"} Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.716640 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-cd2qx" event={"ID":"e2425ef6-315b-4e1c-8004-27599038a670","Type":"ContainerStarted","Data":"f46dee9aaf0ea0d57696700d88d8245860e1cb5d2427ecf3b59bd015de7b7856"} Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.897774 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf"] Jan 27 12:43:53 crc kubenswrapper[4900]: I0127 12:43:53.964644 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks"] Jan 27 12:43:53 crc kubenswrapper[4900]: W0127 12:43:53.971347 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc4f31091_fba1_4fcb_9dd8_929b07b8fc42.slice/crio-fe8cb49a83fd26a53201b85c4fcd571621e228c4f291084edad78bde06e39969 WatchSource:0}: Error finding container fe8cb49a83fd26a53201b85c4fcd571621e228c4f291084edad78bde06e39969: Status 404 returned error can't find the container with id fe8cb49a83fd26a53201b85c4fcd571621e228c4f291084edad78bde06e39969 Jan 27 12:43:54 crc kubenswrapper[4900]: I0127 12:43:54.013024 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5d4d5db777-x4428"] Jan 27 12:43:54 crc kubenswrapper[4900]: W0127 12:43:54.020854 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0bc8176c_abeb_4ac1_90dc_d0c26939e6c3.slice/crio-1456d6b33ccd6b32c741a3317e11f6175ce550a21e7b5db9219f842b3cf19051 WatchSource:0}: Error finding container 1456d6b33ccd6b32c741a3317e11f6175ce550a21e7b5db9219f842b3cf19051: Status 404 returned error can't find the container with id 1456d6b33ccd6b32c741a3317e11f6175ce550a21e7b5db9219f842b3cf19051 Jan 27 12:43:54 crc kubenswrapper[4900]: I0127 12:43:54.730934 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" event={"ID":"c4f31091-fba1-4fcb-9dd8-929b07b8fc42","Type":"ContainerStarted","Data":"fe8cb49a83fd26a53201b85c4fcd571621e228c4f291084edad78bde06e39969"} Jan 27 12:43:54 crc kubenswrapper[4900]: I0127 12:43:54.734596 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5d4d5db777-x4428" event={"ID":"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3","Type":"ContainerStarted","Data":"a126780cd6d5ae4eb121559d736cce47c465cf3ca3baff86838c544747a47a0e"} Jan 27 12:43:54 crc kubenswrapper[4900]: I0127 12:43:54.734680 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5d4d5db777-x4428" event={"ID":"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3","Type":"ContainerStarted","Data":"1456d6b33ccd6b32c741a3317e11f6175ce550a21e7b5db9219f842b3cf19051"} Jan 27 12:43:54 crc kubenswrapper[4900]: I0127 12:43:54.736934 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" event={"ID":"b1058137-9f30-4107-a5a2-1a2edf16cbce","Type":"ContainerStarted","Data":"07cc751a60782a10767dd0f563c64e528cfd3cf4ffdad3f1136c4561e8b6b87d"} Jan 27 12:43:54 crc kubenswrapper[4900]: I0127 12:43:54.759520 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-5d4d5db777-x4428" podStartSLOduration=2.759460329 podStartE2EDuration="2.759460329s" podCreationTimestamp="2026-01-27 12:43:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:43:54.758810621 +0000 UTC m=+1061.995838841" watchObservedRunningTime="2026-01-27 12:43:54.759460329 +0000 UTC m=+1061.996488549" Jan 27 12:43:56 crc kubenswrapper[4900]: I0127 12:43:56.755898 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-wgnlg" event={"ID":"e78c9b0a-3d34-4f6c-9c65-0ae63482fff7","Type":"ContainerStarted","Data":"46389c467567bc0376f0404fee86b810ea5ea65cadd12fba14f44cbe4403db21"} Jan 27 12:43:56 crc kubenswrapper[4900]: I0127 12:43:56.756678 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:43:56 crc kubenswrapper[4900]: I0127 12:43:56.769110 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" event={"ID":"b1058137-9f30-4107-a5a2-1a2edf16cbce","Type":"ContainerStarted","Data":"2b2f6861f22ab6f5adb6243212ace4b1bcb71d8aec8b68b146239fa1693c37e4"} Jan 27 12:43:56 crc kubenswrapper[4900]: I0127 12:43:56.769194 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" Jan 27 12:43:56 crc kubenswrapper[4900]: I0127 12:43:56.772338 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-cd2qx" event={"ID":"e2425ef6-315b-4e1c-8004-27599038a670","Type":"ContainerStarted","Data":"b31cefd843379b9d3455e1f429a50c7580a5d0e086ccf69889c26ad4567e42b8"} Jan 27 12:43:56 crc kubenswrapper[4900]: I0127 12:43:56.786017 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-wgnlg" podStartSLOduration=2.523978632 podStartE2EDuration="5.785982473s" podCreationTimestamp="2026-01-27 12:43:51 +0000 UTC" firstStartedPulling="2026-01-27 12:43:52.70584424 +0000 UTC m=+1059.942872450" lastFinishedPulling="2026-01-27 12:43:55.967848081 +0000 UTC m=+1063.204876291" observedRunningTime="2026-01-27 12:43:56.783544933 +0000 UTC m=+1064.020573143" watchObservedRunningTime="2026-01-27 12:43:56.785982473 +0000 UTC m=+1064.023010683" Jan 27 12:43:56 crc kubenswrapper[4900]: I0127 12:43:56.817025 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" podStartSLOduration=3.772914751 podStartE2EDuration="5.816991414s" podCreationTimestamp="2026-01-27 12:43:51 +0000 UTC" firstStartedPulling="2026-01-27 12:43:53.928184887 +0000 UTC m=+1061.165213127" lastFinishedPulling="2026-01-27 12:43:55.97226158 +0000 UTC m=+1063.209289790" observedRunningTime="2026-01-27 12:43:56.805620384 +0000 UTC m=+1064.042648604" watchObservedRunningTime="2026-01-27 12:43:56.816991414 +0000 UTC m=+1064.054019614" Jan 27 12:43:57 crc kubenswrapper[4900]: I0127 12:43:57.784847 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" event={"ID":"c4f31091-fba1-4fcb-9dd8-929b07b8fc42","Type":"ContainerStarted","Data":"964c64d094a3b6429949beaa01cb2b0f036bc43d9e30ab31d48f62f3f143a093"} Jan 27 12:43:57 crc kubenswrapper[4900]: I0127 12:43:57.811764 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-mrgks" podStartSLOduration=2.5491951950000002 podStartE2EDuration="5.811725462s" podCreationTimestamp="2026-01-27 12:43:52 +0000 UTC" firstStartedPulling="2026-01-27 12:43:53.974280645 +0000 UTC m=+1061.211308855" lastFinishedPulling="2026-01-27 12:43:57.236810912 +0000 UTC m=+1064.473839122" observedRunningTime="2026-01-27 12:43:57.799925599 +0000 UTC m=+1065.036953809" watchObservedRunningTime="2026-01-27 12:43:57.811725462 +0000 UTC m=+1065.048753672" Jan 27 12:43:59 crc kubenswrapper[4900]: I0127 12:43:59.809289 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-cd2qx" event={"ID":"e2425ef6-315b-4e1c-8004-27599038a670","Type":"ContainerStarted","Data":"8f5f2912a86dd214b51dde3fc38ef07c9237c95a7e596763da790bf33da1b8d4"} Jan 27 12:43:59 crc kubenswrapper[4900]: I0127 12:43:59.831267 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-cd2qx" podStartSLOduration=3.334570645 podStartE2EDuration="8.831237992s" podCreationTimestamp="2026-01-27 12:43:51 +0000 UTC" firstStartedPulling="2026-01-27 12:43:53.465322019 +0000 UTC m=+1060.702350229" lastFinishedPulling="2026-01-27 12:43:58.961989376 +0000 UTC m=+1066.199017576" observedRunningTime="2026-01-27 12:43:59.829231894 +0000 UTC m=+1067.066260124" watchObservedRunningTime="2026-01-27 12:43:59.831237992 +0000 UTC m=+1067.068266202" Jan 27 12:44:02 crc kubenswrapper[4900]: I0127 12:44:02.612105 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-wgnlg" Jan 27 12:44:03 crc kubenswrapper[4900]: I0127 12:44:03.334425 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:44:03 crc kubenswrapper[4900]: I0127 12:44:03.334589 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:44:03 crc kubenswrapper[4900]: I0127 12:44:03.348147 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:44:03 crc kubenswrapper[4900]: I0127 12:44:03.860533 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:44:03 crc kubenswrapper[4900]: I0127 12:44:03.930857 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-55dbd56b55-fpf9f"] Jan 27 12:44:12 crc kubenswrapper[4900]: I0127 12:44:12.981167 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" Jan 27 12:44:28 crc kubenswrapper[4900]: I0127 12:44:28.991686 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-55dbd56b55-fpf9f" podUID="06dc74ed-ec3f-4629-93ee-c8e6ee076b5b" containerName="console" containerID="cri-o://c371a6a229b6563fbfb2747d61304f2c7da956984a4aee5b83e3bd141e92cb96" gracePeriod=15 Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.271564 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-55dbd56b55-fpf9f_06dc74ed-ec3f-4629-93ee-c8e6ee076b5b/console/0.log" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.271881 4900 generic.go:334] "Generic (PLEG): container finished" podID="06dc74ed-ec3f-4629-93ee-c8e6ee076b5b" containerID="c371a6a229b6563fbfb2747d61304f2c7da956984a4aee5b83e3bd141e92cb96" exitCode=2 Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.271943 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-55dbd56b55-fpf9f" event={"ID":"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b","Type":"ContainerDied","Data":"c371a6a229b6563fbfb2747d61304f2c7da956984a4aee5b83e3bd141e92cb96"} Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.444044 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-55dbd56b55-fpf9f_06dc74ed-ec3f-4629-93ee-c8e6ee076b5b/console/0.log" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.444671 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.614699 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-trusted-ca-bundle\") pod \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.614789 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-config\") pod \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.614822 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-oauth-config\") pod \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.614979 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-879nn\" (UniqueName: \"kubernetes.io/projected/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-kube-api-access-879nn\") pod \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.615036 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-oauth-serving-cert\") pod \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.615115 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-service-ca\") pod \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.615171 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-serving-cert\") pod \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\" (UID: \"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b\") " Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.616134 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "06dc74ed-ec3f-4629-93ee-c8e6ee076b5b" (UID: "06dc74ed-ec3f-4629-93ee-c8e6ee076b5b"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.616279 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-service-ca" (OuterVolumeSpecName: "service-ca") pod "06dc74ed-ec3f-4629-93ee-c8e6ee076b5b" (UID: "06dc74ed-ec3f-4629-93ee-c8e6ee076b5b"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.616156 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "06dc74ed-ec3f-4629-93ee-c8e6ee076b5b" (UID: "06dc74ed-ec3f-4629-93ee-c8e6ee076b5b"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.616219 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-config" (OuterVolumeSpecName: "console-config") pod "06dc74ed-ec3f-4629-93ee-c8e6ee076b5b" (UID: "06dc74ed-ec3f-4629-93ee-c8e6ee076b5b"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.624351 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-kube-api-access-879nn" (OuterVolumeSpecName: "kube-api-access-879nn") pod "06dc74ed-ec3f-4629-93ee-c8e6ee076b5b" (UID: "06dc74ed-ec3f-4629-93ee-c8e6ee076b5b"). InnerVolumeSpecName "kube-api-access-879nn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.624330 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "06dc74ed-ec3f-4629-93ee-c8e6ee076b5b" (UID: "06dc74ed-ec3f-4629-93ee-c8e6ee076b5b"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.624540 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "06dc74ed-ec3f-4629-93ee-c8e6ee076b5b" (UID: "06dc74ed-ec3f-4629-93ee-c8e6ee076b5b"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.722816 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-879nn\" (UniqueName: \"kubernetes.io/projected/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-kube-api-access-879nn\") on node \"crc\" DevicePath \"\"" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.724369 4900 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.724397 4900 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.724469 4900 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.724486 4900 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.724502 4900 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:44:29 crc kubenswrapper[4900]: I0127 12:44:29.724515 4900 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:44:30 crc kubenswrapper[4900]: I0127 12:44:30.297458 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-55dbd56b55-fpf9f_06dc74ed-ec3f-4629-93ee-c8e6ee076b5b/console/0.log" Jan 27 12:44:30 crc kubenswrapper[4900]: I0127 12:44:30.297917 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-55dbd56b55-fpf9f" event={"ID":"06dc74ed-ec3f-4629-93ee-c8e6ee076b5b","Type":"ContainerDied","Data":"e270133a38d34981a4bab8125836663a7664a2106742cc1ff6f55cf5958a17a2"} Jan 27 12:44:30 crc kubenswrapper[4900]: I0127 12:44:30.298047 4900 scope.go:117] "RemoveContainer" containerID="c371a6a229b6563fbfb2747d61304f2c7da956984a4aee5b83e3bd141e92cb96" Jan 27 12:44:30 crc kubenswrapper[4900]: I0127 12:44:30.298540 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-55dbd56b55-fpf9f" Jan 27 12:44:30 crc kubenswrapper[4900]: I0127 12:44:30.388782 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-55dbd56b55-fpf9f"] Jan 27 12:44:30 crc kubenswrapper[4900]: I0127 12:44:30.395415 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-55dbd56b55-fpf9f"] Jan 27 12:44:30 crc kubenswrapper[4900]: E0127 12:44:30.471470 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06dc74ed_ec3f_4629_93ee_c8e6ee076b5b.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06dc74ed_ec3f_4629_93ee_c8e6ee076b5b.slice/crio-e270133a38d34981a4bab8125836663a7664a2106742cc1ff6f55cf5958a17a2\": RecentStats: unable to find data in memory cache]" Jan 27 12:44:30 crc kubenswrapper[4900]: I0127 12:44:30.499768 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06dc74ed-ec3f-4629-93ee-c8e6ee076b5b" path="/var/lib/kubelet/pods/06dc74ed-ec3f-4629-93ee-c8e6ee076b5b/volumes" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.642997 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t"] Jan 27 12:44:33 crc kubenswrapper[4900]: E0127 12:44:33.644506 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06dc74ed-ec3f-4629-93ee-c8e6ee076b5b" containerName="console" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.644541 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="06dc74ed-ec3f-4629-93ee-c8e6ee076b5b" containerName="console" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.644837 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="06dc74ed-ec3f-4629-93ee-c8e6ee076b5b" containerName="console" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.649896 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.653889 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.667670 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t"] Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.826390 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4c53630b-90cf-4691-9746-161271db745f-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t\" (UID: \"4c53630b-90cf-4691-9746-161271db745f\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.826849 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4c53630b-90cf-4691-9746-161271db745f-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t\" (UID: \"4c53630b-90cf-4691-9746-161271db745f\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.827283 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zz9cd\" (UniqueName: \"kubernetes.io/projected/4c53630b-90cf-4691-9746-161271db745f-kube-api-access-zz9cd\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t\" (UID: \"4c53630b-90cf-4691-9746-161271db745f\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.929149 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4c53630b-90cf-4691-9746-161271db745f-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t\" (UID: \"4c53630b-90cf-4691-9746-161271db745f\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.929586 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zz9cd\" (UniqueName: \"kubernetes.io/projected/4c53630b-90cf-4691-9746-161271db745f-kube-api-access-zz9cd\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t\" (UID: \"4c53630b-90cf-4691-9746-161271db745f\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.929684 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4c53630b-90cf-4691-9746-161271db745f-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t\" (UID: \"4c53630b-90cf-4691-9746-161271db745f\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.929837 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4c53630b-90cf-4691-9746-161271db745f-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t\" (UID: \"4c53630b-90cf-4691-9746-161271db745f\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.930321 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4c53630b-90cf-4691-9746-161271db745f-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t\" (UID: \"4c53630b-90cf-4691-9746-161271db745f\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.966435 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zz9cd\" (UniqueName: \"kubernetes.io/projected/4c53630b-90cf-4691-9746-161271db745f-kube-api-access-zz9cd\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t\" (UID: \"4c53630b-90cf-4691-9746-161271db745f\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" Jan 27 12:44:33 crc kubenswrapper[4900]: I0127 12:44:33.997108 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" Jan 27 12:44:34 crc kubenswrapper[4900]: I0127 12:44:34.361497 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t"] Jan 27 12:44:34 crc kubenswrapper[4900]: I0127 12:44:34.650303 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" event={"ID":"4c53630b-90cf-4691-9746-161271db745f","Type":"ContainerStarted","Data":"df934684b61b767220669b8f7623867a01fbc18b418c0e6cf43c3323727a496e"} Jan 27 12:44:34 crc kubenswrapper[4900]: I0127 12:44:34.650379 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" event={"ID":"4c53630b-90cf-4691-9746-161271db745f","Type":"ContainerStarted","Data":"777ef71c72ecb0fc3f4a8fa563e1bd1da998a741c46be97839efd7bcf0a428e4"} Jan 27 12:44:35 crc kubenswrapper[4900]: I0127 12:44:35.661089 4900 generic.go:334] "Generic (PLEG): container finished" podID="4c53630b-90cf-4691-9746-161271db745f" containerID="df934684b61b767220669b8f7623867a01fbc18b418c0e6cf43c3323727a496e" exitCode=0 Jan 27 12:44:35 crc kubenswrapper[4900]: I0127 12:44:35.661204 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" event={"ID":"4c53630b-90cf-4691-9746-161271db745f","Type":"ContainerDied","Data":"df934684b61b767220669b8f7623867a01fbc18b418c0e6cf43c3323727a496e"} Jan 27 12:44:37 crc kubenswrapper[4900]: I0127 12:44:37.679330 4900 generic.go:334] "Generic (PLEG): container finished" podID="4c53630b-90cf-4691-9746-161271db745f" containerID="7e13b3b4ccc03f5583f91e0b86c5207f21a30f0e524cf4e942038a6e6494c297" exitCode=0 Jan 27 12:44:37 crc kubenswrapper[4900]: I0127 12:44:37.679384 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" event={"ID":"4c53630b-90cf-4691-9746-161271db745f","Type":"ContainerDied","Data":"7e13b3b4ccc03f5583f91e0b86c5207f21a30f0e524cf4e942038a6e6494c297"} Jan 27 12:44:38 crc kubenswrapper[4900]: I0127 12:44:38.689810 4900 generic.go:334] "Generic (PLEG): container finished" podID="4c53630b-90cf-4691-9746-161271db745f" containerID="834dfa17be367221d88f9d04c8c3e69a373e2ec7a064a37e7d6e428e15b4a4f2" exitCode=0 Jan 27 12:44:38 crc kubenswrapper[4900]: I0127 12:44:38.689878 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" event={"ID":"4c53630b-90cf-4691-9746-161271db745f","Type":"ContainerDied","Data":"834dfa17be367221d88f9d04c8c3e69a373e2ec7a064a37e7d6e428e15b4a4f2"} Jan 27 12:44:40 crc kubenswrapper[4900]: I0127 12:44:40.000622 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" Jan 27 12:44:40 crc kubenswrapper[4900]: I0127 12:44:40.181657 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4c53630b-90cf-4691-9746-161271db745f-bundle\") pod \"4c53630b-90cf-4691-9746-161271db745f\" (UID: \"4c53630b-90cf-4691-9746-161271db745f\") " Jan 27 12:44:40 crc kubenswrapper[4900]: I0127 12:44:40.182076 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zz9cd\" (UniqueName: \"kubernetes.io/projected/4c53630b-90cf-4691-9746-161271db745f-kube-api-access-zz9cd\") pod \"4c53630b-90cf-4691-9746-161271db745f\" (UID: \"4c53630b-90cf-4691-9746-161271db745f\") " Jan 27 12:44:40 crc kubenswrapper[4900]: I0127 12:44:40.182174 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4c53630b-90cf-4691-9746-161271db745f-util\") pod \"4c53630b-90cf-4691-9746-161271db745f\" (UID: \"4c53630b-90cf-4691-9746-161271db745f\") " Jan 27 12:44:40 crc kubenswrapper[4900]: I0127 12:44:40.183011 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c53630b-90cf-4691-9746-161271db745f-bundle" (OuterVolumeSpecName: "bundle") pod "4c53630b-90cf-4691-9746-161271db745f" (UID: "4c53630b-90cf-4691-9746-161271db745f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:44:40 crc kubenswrapper[4900]: I0127 12:44:40.193505 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c53630b-90cf-4691-9746-161271db745f-kube-api-access-zz9cd" (OuterVolumeSpecName: "kube-api-access-zz9cd") pod "4c53630b-90cf-4691-9746-161271db745f" (UID: "4c53630b-90cf-4691-9746-161271db745f"). InnerVolumeSpecName "kube-api-access-zz9cd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:44:40 crc kubenswrapper[4900]: I0127 12:44:40.284381 4900 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4c53630b-90cf-4691-9746-161271db745f-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:44:40 crc kubenswrapper[4900]: I0127 12:44:40.284425 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zz9cd\" (UniqueName: \"kubernetes.io/projected/4c53630b-90cf-4691-9746-161271db745f-kube-api-access-zz9cd\") on node \"crc\" DevicePath \"\"" Jan 27 12:44:40 crc kubenswrapper[4900]: I0127 12:44:40.330577 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c53630b-90cf-4691-9746-161271db745f-util" (OuterVolumeSpecName: "util") pod "4c53630b-90cf-4691-9746-161271db745f" (UID: "4c53630b-90cf-4691-9746-161271db745f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:44:40 crc kubenswrapper[4900]: I0127 12:44:40.385913 4900 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4c53630b-90cf-4691-9746-161271db745f-util\") on node \"crc\" DevicePath \"\"" Jan 27 12:44:40 crc kubenswrapper[4900]: I0127 12:44:40.735009 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" Jan 27 12:44:40 crc kubenswrapper[4900]: I0127 12:44:40.734959 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t" event={"ID":"4c53630b-90cf-4691-9746-161271db745f","Type":"ContainerDied","Data":"777ef71c72ecb0fc3f4a8fa563e1bd1da998a741c46be97839efd7bcf0a428e4"} Jan 27 12:44:40 crc kubenswrapper[4900]: I0127 12:44:40.735099 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="777ef71c72ecb0fc3f4a8fa563e1bd1da998a741c46be97839efd7bcf0a428e4" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.264279 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-694495f969-v6psl"] Jan 27 12:44:48 crc kubenswrapper[4900]: E0127 12:44:48.265791 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c53630b-90cf-4691-9746-161271db745f" containerName="util" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.265816 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c53630b-90cf-4691-9746-161271db745f" containerName="util" Jan 27 12:44:48 crc kubenswrapper[4900]: E0127 12:44:48.265857 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c53630b-90cf-4691-9746-161271db745f" containerName="extract" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.265867 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c53630b-90cf-4691-9746-161271db745f" containerName="extract" Jan 27 12:44:48 crc kubenswrapper[4900]: E0127 12:44:48.265893 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c53630b-90cf-4691-9746-161271db745f" containerName="pull" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.265903 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c53630b-90cf-4691-9746-161271db745f" containerName="pull" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.266113 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c53630b-90cf-4691-9746-161271db745f" containerName="extract" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.266994 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.271432 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.271986 4900 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.272289 4900 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.277914 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.278343 4900 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-9b992" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.290087 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-694495f969-v6psl"] Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.303630 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wl9h\" (UniqueName: \"kubernetes.io/projected/88d8052f-1988-4229-abc5-100335ed01e2-kube-api-access-4wl9h\") pod \"metallb-operator-controller-manager-694495f969-v6psl\" (UID: \"88d8052f-1988-4229-abc5-100335ed01e2\") " pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.304244 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/88d8052f-1988-4229-abc5-100335ed01e2-apiservice-cert\") pod \"metallb-operator-controller-manager-694495f969-v6psl\" (UID: \"88d8052f-1988-4229-abc5-100335ed01e2\") " pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.304303 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/88d8052f-1988-4229-abc5-100335ed01e2-webhook-cert\") pod \"metallb-operator-controller-manager-694495f969-v6psl\" (UID: \"88d8052f-1988-4229-abc5-100335ed01e2\") " pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.422615 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/88d8052f-1988-4229-abc5-100335ed01e2-apiservice-cert\") pod \"metallb-operator-controller-manager-694495f969-v6psl\" (UID: \"88d8052f-1988-4229-abc5-100335ed01e2\") " pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.422694 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/88d8052f-1988-4229-abc5-100335ed01e2-webhook-cert\") pod \"metallb-operator-controller-manager-694495f969-v6psl\" (UID: \"88d8052f-1988-4229-abc5-100335ed01e2\") " pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.422779 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wl9h\" (UniqueName: \"kubernetes.io/projected/88d8052f-1988-4229-abc5-100335ed01e2-kube-api-access-4wl9h\") pod \"metallb-operator-controller-manager-694495f969-v6psl\" (UID: \"88d8052f-1988-4229-abc5-100335ed01e2\") " pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.437121 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/88d8052f-1988-4229-abc5-100335ed01e2-webhook-cert\") pod \"metallb-operator-controller-manager-694495f969-v6psl\" (UID: \"88d8052f-1988-4229-abc5-100335ed01e2\") " pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.437677 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/88d8052f-1988-4229-abc5-100335ed01e2-apiservice-cert\") pod \"metallb-operator-controller-manager-694495f969-v6psl\" (UID: \"88d8052f-1988-4229-abc5-100335ed01e2\") " pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.459037 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wl9h\" (UniqueName: \"kubernetes.io/projected/88d8052f-1988-4229-abc5-100335ed01e2-kube-api-access-4wl9h\") pod \"metallb-operator-controller-manager-694495f969-v6psl\" (UID: \"88d8052f-1988-4229-abc5-100335ed01e2\") " pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.587822 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-844499dc88-f72ld"] Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.589530 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.592620 4900 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.592620 4900 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.593216 4900 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-zhbhm" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.597794 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.617029 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-844499dc88-f72ld"] Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.729339 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0d203d65-c23c-4e25-b72b-7b5a69441b5f-webhook-cert\") pod \"metallb-operator-webhook-server-844499dc88-f72ld\" (UID: \"0d203d65-c23c-4e25-b72b-7b5a69441b5f\") " pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.729817 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0d203d65-c23c-4e25-b72b-7b5a69441b5f-apiservice-cert\") pod \"metallb-operator-webhook-server-844499dc88-f72ld\" (UID: \"0d203d65-c23c-4e25-b72b-7b5a69441b5f\") " pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.730050 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vv8k\" (UniqueName: \"kubernetes.io/projected/0d203d65-c23c-4e25-b72b-7b5a69441b5f-kube-api-access-5vv8k\") pod \"metallb-operator-webhook-server-844499dc88-f72ld\" (UID: \"0d203d65-c23c-4e25-b72b-7b5a69441b5f\") " pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.831948 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0d203d65-c23c-4e25-b72b-7b5a69441b5f-webhook-cert\") pod \"metallb-operator-webhook-server-844499dc88-f72ld\" (UID: \"0d203d65-c23c-4e25-b72b-7b5a69441b5f\") " pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.832173 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0d203d65-c23c-4e25-b72b-7b5a69441b5f-apiservice-cert\") pod \"metallb-operator-webhook-server-844499dc88-f72ld\" (UID: \"0d203d65-c23c-4e25-b72b-7b5a69441b5f\") " pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.832229 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vv8k\" (UniqueName: \"kubernetes.io/projected/0d203d65-c23c-4e25-b72b-7b5a69441b5f-kube-api-access-5vv8k\") pod \"metallb-operator-webhook-server-844499dc88-f72ld\" (UID: \"0d203d65-c23c-4e25-b72b-7b5a69441b5f\") " pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.841841 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0d203d65-c23c-4e25-b72b-7b5a69441b5f-webhook-cert\") pod \"metallb-operator-webhook-server-844499dc88-f72ld\" (UID: \"0d203d65-c23c-4e25-b72b-7b5a69441b5f\") " pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.848854 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0d203d65-c23c-4e25-b72b-7b5a69441b5f-apiservice-cert\") pod \"metallb-operator-webhook-server-844499dc88-f72ld\" (UID: \"0d203d65-c23c-4e25-b72b-7b5a69441b5f\") " pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.854282 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vv8k\" (UniqueName: \"kubernetes.io/projected/0d203d65-c23c-4e25-b72b-7b5a69441b5f-kube-api-access-5vv8k\") pod \"metallb-operator-webhook-server-844499dc88-f72ld\" (UID: \"0d203d65-c23c-4e25-b72b-7b5a69441b5f\") " pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 12:44:48 crc kubenswrapper[4900]: I0127 12:44:48.910783 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 12:44:49 crc kubenswrapper[4900]: I0127 12:44:49.222537 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-694495f969-v6psl"] Jan 27 12:44:49 crc kubenswrapper[4900]: I0127 12:44:49.434505 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-844499dc88-f72ld"] Jan 27 12:44:49 crc kubenswrapper[4900]: W0127 12:44:49.448389 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d203d65_c23c_4e25_b72b_7b5a69441b5f.slice/crio-f9457adedef0f7bb4f6c39c0111724e1e19e01e0ad82476e03b822fa45dc751e WatchSource:0}: Error finding container f9457adedef0f7bb4f6c39c0111724e1e19e01e0ad82476e03b822fa45dc751e: Status 404 returned error can't find the container with id f9457adedef0f7bb4f6c39c0111724e1e19e01e0ad82476e03b822fa45dc751e Jan 27 12:44:49 crc kubenswrapper[4900]: I0127 12:44:49.846979 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" event={"ID":"88d8052f-1988-4229-abc5-100335ed01e2","Type":"ContainerStarted","Data":"e9130a0b72672e2e98331d446d82cae226f662b5979b01b279850e80dc33d2f7"} Jan 27 12:44:49 crc kubenswrapper[4900]: I0127 12:44:49.849033 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" event={"ID":"0d203d65-c23c-4e25-b72b-7b5a69441b5f","Type":"ContainerStarted","Data":"f9457adedef0f7bb4f6c39c0111724e1e19e01e0ad82476e03b822fa45dc751e"} Jan 27 12:44:56 crc kubenswrapper[4900]: I0127 12:44:56.934372 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" event={"ID":"88d8052f-1988-4229-abc5-100335ed01e2","Type":"ContainerStarted","Data":"82cee18623fafef839bdda5d19a62285c8f3ea7de0aae258e50583ac2a70be0b"} Jan 27 12:44:56 crc kubenswrapper[4900]: I0127 12:44:56.937003 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" Jan 27 12:44:56 crc kubenswrapper[4900]: I0127 12:44:56.937660 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" event={"ID":"0d203d65-c23c-4e25-b72b-7b5a69441b5f","Type":"ContainerStarted","Data":"c9aaccf51243d1ad8d169bde83a0f99376ea5cf45a09ecc63fad8e379a6a51e5"} Jan 27 12:44:56 crc kubenswrapper[4900]: I0127 12:44:56.937832 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 12:44:56 crc kubenswrapper[4900]: I0127 12:44:56.989781 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" podStartSLOduration=2.4042387610000002 podStartE2EDuration="8.989711072s" podCreationTimestamp="2026-01-27 12:44:48 +0000 UTC" firstStartedPulling="2026-01-27 12:44:49.234460452 +0000 UTC m=+1116.471488662" lastFinishedPulling="2026-01-27 12:44:55.819932763 +0000 UTC m=+1123.056960973" observedRunningTime="2026-01-27 12:44:56.98929691 +0000 UTC m=+1124.226325120" watchObservedRunningTime="2026-01-27 12:44:56.989711072 +0000 UTC m=+1124.226739282" Jan 27 12:44:57 crc kubenswrapper[4900]: I0127 12:44:57.039644 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" podStartSLOduration=2.638861486 podStartE2EDuration="9.039576222s" podCreationTimestamp="2026-01-27 12:44:48 +0000 UTC" firstStartedPulling="2026-01-27 12:44:49.453235576 +0000 UTC m=+1116.690263776" lastFinishedPulling="2026-01-27 12:44:55.853950302 +0000 UTC m=+1123.090978512" observedRunningTime="2026-01-27 12:44:57.035112963 +0000 UTC m=+1124.272141173" watchObservedRunningTime="2026-01-27 12:44:57.039576222 +0000 UTC m=+1124.276604432" Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.152708 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4"] Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.154454 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.158569 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.158698 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.172536 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4"] Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.180097 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmw88\" (UniqueName: \"kubernetes.io/projected/f207e2d8-e4bc-4953-881f-730cff507dac-kube-api-access-kmw88\") pod \"collect-profiles-29491965-xptv4\" (UID: \"f207e2d8-e4bc-4953-881f-730cff507dac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.180785 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f207e2d8-e4bc-4953-881f-730cff507dac-secret-volume\") pod \"collect-profiles-29491965-xptv4\" (UID: \"f207e2d8-e4bc-4953-881f-730cff507dac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.180903 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f207e2d8-e4bc-4953-881f-730cff507dac-config-volume\") pod \"collect-profiles-29491965-xptv4\" (UID: \"f207e2d8-e4bc-4953-881f-730cff507dac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.283857 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f207e2d8-e4bc-4953-881f-730cff507dac-secret-volume\") pod \"collect-profiles-29491965-xptv4\" (UID: \"f207e2d8-e4bc-4953-881f-730cff507dac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.283938 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f207e2d8-e4bc-4953-881f-730cff507dac-config-volume\") pod \"collect-profiles-29491965-xptv4\" (UID: \"f207e2d8-e4bc-4953-881f-730cff507dac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.284010 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmw88\" (UniqueName: \"kubernetes.io/projected/f207e2d8-e4bc-4953-881f-730cff507dac-kube-api-access-kmw88\") pod \"collect-profiles-29491965-xptv4\" (UID: \"f207e2d8-e4bc-4953-881f-730cff507dac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.285577 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f207e2d8-e4bc-4953-881f-730cff507dac-config-volume\") pod \"collect-profiles-29491965-xptv4\" (UID: \"f207e2d8-e4bc-4953-881f-730cff507dac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.308858 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f207e2d8-e4bc-4953-881f-730cff507dac-secret-volume\") pod \"collect-profiles-29491965-xptv4\" (UID: \"f207e2d8-e4bc-4953-881f-730cff507dac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.315988 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmw88\" (UniqueName: \"kubernetes.io/projected/f207e2d8-e4bc-4953-881f-730cff507dac-kube-api-access-kmw88\") pod \"collect-profiles-29491965-xptv4\" (UID: \"f207e2d8-e4bc-4953-881f-730cff507dac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" Jan 27 12:45:00 crc kubenswrapper[4900]: I0127 12:45:00.502617 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" Jan 27 12:45:01 crc kubenswrapper[4900]: I0127 12:45:01.039600 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4"] Jan 27 12:45:01 crc kubenswrapper[4900]: I0127 12:45:01.995143 4900 generic.go:334] "Generic (PLEG): container finished" podID="f207e2d8-e4bc-4953-881f-730cff507dac" containerID="acca4490f01236a385baf7895d9a3611d6836e79edebf08b5adef8752fbbc6f7" exitCode=0 Jan 27 12:45:01 crc kubenswrapper[4900]: I0127 12:45:01.995280 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" event={"ID":"f207e2d8-e4bc-4953-881f-730cff507dac","Type":"ContainerDied","Data":"acca4490f01236a385baf7895d9a3611d6836e79edebf08b5adef8752fbbc6f7"} Jan 27 12:45:01 crc kubenswrapper[4900]: I0127 12:45:01.995685 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" event={"ID":"f207e2d8-e4bc-4953-881f-730cff507dac","Type":"ContainerStarted","Data":"e25fd8cdd4066122a5c9b07a2b1a34fbf8d8a3002df26ac2aba3d19a2607bd1e"} Jan 27 12:45:03 crc kubenswrapper[4900]: I0127 12:45:03.403316 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" Jan 27 12:45:03 crc kubenswrapper[4900]: I0127 12:45:03.485791 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f207e2d8-e4bc-4953-881f-730cff507dac-config-volume\") pod \"f207e2d8-e4bc-4953-881f-730cff507dac\" (UID: \"f207e2d8-e4bc-4953-881f-730cff507dac\") " Jan 27 12:45:03 crc kubenswrapper[4900]: I0127 12:45:03.485922 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kmw88\" (UniqueName: \"kubernetes.io/projected/f207e2d8-e4bc-4953-881f-730cff507dac-kube-api-access-kmw88\") pod \"f207e2d8-e4bc-4953-881f-730cff507dac\" (UID: \"f207e2d8-e4bc-4953-881f-730cff507dac\") " Jan 27 12:45:03 crc kubenswrapper[4900]: I0127 12:45:03.486032 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f207e2d8-e4bc-4953-881f-730cff507dac-secret-volume\") pod \"f207e2d8-e4bc-4953-881f-730cff507dac\" (UID: \"f207e2d8-e4bc-4953-881f-730cff507dac\") " Jan 27 12:45:03 crc kubenswrapper[4900]: I0127 12:45:03.487355 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f207e2d8-e4bc-4953-881f-730cff507dac-config-volume" (OuterVolumeSpecName: "config-volume") pod "f207e2d8-e4bc-4953-881f-730cff507dac" (UID: "f207e2d8-e4bc-4953-881f-730cff507dac"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:45:03 crc kubenswrapper[4900]: I0127 12:45:03.494122 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f207e2d8-e4bc-4953-881f-730cff507dac-kube-api-access-kmw88" (OuterVolumeSpecName: "kube-api-access-kmw88") pod "f207e2d8-e4bc-4953-881f-730cff507dac" (UID: "f207e2d8-e4bc-4953-881f-730cff507dac"). InnerVolumeSpecName "kube-api-access-kmw88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:45:03 crc kubenswrapper[4900]: I0127 12:45:03.494404 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f207e2d8-e4bc-4953-881f-730cff507dac-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f207e2d8-e4bc-4953-881f-730cff507dac" (UID: "f207e2d8-e4bc-4953-881f-730cff507dac"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:45:03 crc kubenswrapper[4900]: I0127 12:45:03.598328 4900 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f207e2d8-e4bc-4953-881f-730cff507dac-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 12:45:03 crc kubenswrapper[4900]: I0127 12:45:03.598368 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kmw88\" (UniqueName: \"kubernetes.io/projected/f207e2d8-e4bc-4953-881f-730cff507dac-kube-api-access-kmw88\") on node \"crc\" DevicePath \"\"" Jan 27 12:45:03 crc kubenswrapper[4900]: I0127 12:45:03.598381 4900 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f207e2d8-e4bc-4953-881f-730cff507dac-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 12:45:04 crc kubenswrapper[4900]: I0127 12:45:04.194569 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" event={"ID":"f207e2d8-e4bc-4953-881f-730cff507dac","Type":"ContainerDied","Data":"e25fd8cdd4066122a5c9b07a2b1a34fbf8d8a3002df26ac2aba3d19a2607bd1e"} Jan 27 12:45:04 crc kubenswrapper[4900]: I0127 12:45:04.194642 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e25fd8cdd4066122a5c9b07a2b1a34fbf8d8a3002df26ac2aba3d19a2607bd1e" Jan 27 12:45:04 crc kubenswrapper[4900]: I0127 12:45:04.194716 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4" Jan 27 12:45:08 crc kubenswrapper[4900]: I0127 12:45:08.958806 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 12:45:22 crc kubenswrapper[4900]: I0127 12:45:22.372775 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:45:22 crc kubenswrapper[4900]: I0127 12:45:22.375356 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:45:28 crc kubenswrapper[4900]: I0127 12:45:28.615704 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.701199 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-gnhhx"] Jan 27 12:45:29 crc kubenswrapper[4900]: E0127 12:45:29.701967 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f207e2d8-e4bc-4953-881f-730cff507dac" containerName="collect-profiles" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.701997 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f207e2d8-e4bc-4953-881f-730cff507dac" containerName="collect-profiles" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.702328 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f207e2d8-e4bc-4953-881f-730cff507dac" containerName="collect-profiles" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.706117 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.708177 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4"] Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.709602 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.713925 4900 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.714117 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.715312 4900 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-gcj7b" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.716072 4900 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.731481 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4"] Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.839712 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-8mc5v"] Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.842562 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-8mc5v" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.844856 4900 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-l4zkp" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.845029 4900 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.845386 4900 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.847069 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.855504 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-7dbg4"] Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.857030 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-7dbg4" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.859389 4900 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.860883 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/ec5f276d-77b1-4fa8-b00b-7230c546a47f-frr-conf\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.861152 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wbcw\" (UniqueName: \"kubernetes.io/projected/ec5f276d-77b1-4fa8-b00b-7230c546a47f-kube-api-access-2wbcw\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.861297 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/ec5f276d-77b1-4fa8-b00b-7230c546a47f-metrics\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.861546 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8031fe9b-6753-4ab7-abac-fece10fd066b-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-9blf4\" (UID: \"8031fe9b-6753-4ab7-abac-fece10fd066b\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.861716 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ec5f276d-77b1-4fa8-b00b-7230c546a47f-metrics-certs\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.861842 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8r6qg\" (UniqueName: \"kubernetes.io/projected/8031fe9b-6753-4ab7-abac-fece10fd066b-kube-api-access-8r6qg\") pod \"frr-k8s-webhook-server-7df86c4f6c-9blf4\" (UID: \"8031fe9b-6753-4ab7-abac-fece10fd066b\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.861967 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/ec5f276d-77b1-4fa8-b00b-7230c546a47f-reloader\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.862094 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/ec5f276d-77b1-4fa8-b00b-7230c546a47f-frr-sockets\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.862347 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/ec5f276d-77b1-4fa8-b00b-7230c546a47f-frr-startup\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.873662 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-7dbg4"] Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.964467 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-memberlist\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.964570 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4ksj\" (UniqueName: \"kubernetes.io/projected/b047f3e7-1d76-487b-96a3-ff81b159ae95-kube-api-access-v4ksj\") pod \"controller-6968d8fdc4-7dbg4\" (UID: \"b047f3e7-1d76-487b-96a3-ff81b159ae95\") " pod="metallb-system/controller-6968d8fdc4-7dbg4" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.965382 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ec5f276d-77b1-4fa8-b00b-7230c546a47f-metrics-certs\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.965426 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8r6qg\" (UniqueName: \"kubernetes.io/projected/8031fe9b-6753-4ab7-abac-fece10fd066b-kube-api-access-8r6qg\") pod \"frr-k8s-webhook-server-7df86c4f6c-9blf4\" (UID: \"8031fe9b-6753-4ab7-abac-fece10fd066b\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.965454 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/ec5f276d-77b1-4fa8-b00b-7230c546a47f-reloader\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.965484 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/ec5f276d-77b1-4fa8-b00b-7230c546a47f-frr-sockets\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.965523 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/ec5f276d-77b1-4fa8-b00b-7230c546a47f-frr-startup\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.965555 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b047f3e7-1d76-487b-96a3-ff81b159ae95-cert\") pod \"controller-6968d8fdc4-7dbg4\" (UID: \"b047f3e7-1d76-487b-96a3-ff81b159ae95\") " pod="metallb-system/controller-6968d8fdc4-7dbg4" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.965591 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b047f3e7-1d76-487b-96a3-ff81b159ae95-metrics-certs\") pod \"controller-6968d8fdc4-7dbg4\" (UID: \"b047f3e7-1d76-487b-96a3-ff81b159ae95\") " pod="metallb-system/controller-6968d8fdc4-7dbg4" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.965616 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-metrics-certs\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.965633 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/7513569c-d113-4de0-8d1c-734db1c14659-metallb-excludel2\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.965660 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/ec5f276d-77b1-4fa8-b00b-7230c546a47f-frr-conf\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.965709 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fds5f\" (UniqueName: \"kubernetes.io/projected/7513569c-d113-4de0-8d1c-734db1c14659-kube-api-access-fds5f\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.965739 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wbcw\" (UniqueName: \"kubernetes.io/projected/ec5f276d-77b1-4fa8-b00b-7230c546a47f-kube-api-access-2wbcw\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.965774 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/ec5f276d-77b1-4fa8-b00b-7230c546a47f-metrics\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.966265 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/ec5f276d-77b1-4fa8-b00b-7230c546a47f-metrics\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.967216 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8031fe9b-6753-4ab7-abac-fece10fd066b-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-9blf4\" (UID: \"8031fe9b-6753-4ab7-abac-fece10fd066b\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.967489 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/ec5f276d-77b1-4fa8-b00b-7230c546a47f-frr-conf\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.968520 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/ec5f276d-77b1-4fa8-b00b-7230c546a47f-frr-startup\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.968585 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/ec5f276d-77b1-4fa8-b00b-7230c546a47f-frr-sockets\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.969239 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/ec5f276d-77b1-4fa8-b00b-7230c546a47f-reloader\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.973586 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ec5f276d-77b1-4fa8-b00b-7230c546a47f-metrics-certs\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.974543 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8031fe9b-6753-4ab7-abac-fece10fd066b-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-9blf4\" (UID: \"8031fe9b-6753-4ab7-abac-fece10fd066b\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.987619 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wbcw\" (UniqueName: \"kubernetes.io/projected/ec5f276d-77b1-4fa8-b00b-7230c546a47f-kube-api-access-2wbcw\") pod \"frr-k8s-gnhhx\" (UID: \"ec5f276d-77b1-4fa8-b00b-7230c546a47f\") " pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:29 crc kubenswrapper[4900]: I0127 12:45:29.988237 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8r6qg\" (UniqueName: \"kubernetes.io/projected/8031fe9b-6753-4ab7-abac-fece10fd066b-kube-api-access-8r6qg\") pod \"frr-k8s-webhook-server-7df86c4f6c-9blf4\" (UID: \"8031fe9b-6753-4ab7-abac-fece10fd066b\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.030213 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.054576 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.070088 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b047f3e7-1d76-487b-96a3-ff81b159ae95-metrics-certs\") pod \"controller-6968d8fdc4-7dbg4\" (UID: \"b047f3e7-1d76-487b-96a3-ff81b159ae95\") " pod="metallb-system/controller-6968d8fdc4-7dbg4" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.070152 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-metrics-certs\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.070187 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/7513569c-d113-4de0-8d1c-734db1c14659-metallb-excludel2\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.070295 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fds5f\" (UniqueName: \"kubernetes.io/projected/7513569c-d113-4de0-8d1c-734db1c14659-kube-api-access-fds5f\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.070425 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-memberlist\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.070455 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4ksj\" (UniqueName: \"kubernetes.io/projected/b047f3e7-1d76-487b-96a3-ff81b159ae95-kube-api-access-v4ksj\") pod \"controller-6968d8fdc4-7dbg4\" (UID: \"b047f3e7-1d76-487b-96a3-ff81b159ae95\") " pod="metallb-system/controller-6968d8fdc4-7dbg4" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.070525 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b047f3e7-1d76-487b-96a3-ff81b159ae95-cert\") pod \"controller-6968d8fdc4-7dbg4\" (UID: \"b047f3e7-1d76-487b-96a3-ff81b159ae95\") " pod="metallb-system/controller-6968d8fdc4-7dbg4" Jan 27 12:45:30 crc kubenswrapper[4900]: E0127 12:45:30.071800 4900 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Jan 27 12:45:30 crc kubenswrapper[4900]: E0127 12:45:30.071931 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b047f3e7-1d76-487b-96a3-ff81b159ae95-metrics-certs podName:b047f3e7-1d76-487b-96a3-ff81b159ae95 nodeName:}" failed. No retries permitted until 2026-01-27 12:45:30.571893123 +0000 UTC m=+1157.808921333 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b047f3e7-1d76-487b-96a3-ff81b159ae95-metrics-certs") pod "controller-6968d8fdc4-7dbg4" (UID: "b047f3e7-1d76-487b-96a3-ff81b159ae95") : secret "controller-certs-secret" not found Jan 27 12:45:30 crc kubenswrapper[4900]: E0127 12:45:30.072868 4900 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 27 12:45:30 crc kubenswrapper[4900]: E0127 12:45:30.072914 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-memberlist podName:7513569c-d113-4de0-8d1c-734db1c14659 nodeName:}" failed. No retries permitted until 2026-01-27 12:45:30.572900142 +0000 UTC m=+1157.809928352 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-memberlist") pod "speaker-8mc5v" (UID: "7513569c-d113-4de0-8d1c-734db1c14659") : secret "metallb-memberlist" not found Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.077930 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/7513569c-d113-4de0-8d1c-734db1c14659-metallb-excludel2\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.079949 4900 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.087807 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-metrics-certs\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.088458 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b047f3e7-1d76-487b-96a3-ff81b159ae95-cert\") pod \"controller-6968d8fdc4-7dbg4\" (UID: \"b047f3e7-1d76-487b-96a3-ff81b159ae95\") " pod="metallb-system/controller-6968d8fdc4-7dbg4" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.097787 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4ksj\" (UniqueName: \"kubernetes.io/projected/b047f3e7-1d76-487b-96a3-ff81b159ae95-kube-api-access-v4ksj\") pod \"controller-6968d8fdc4-7dbg4\" (UID: \"b047f3e7-1d76-487b-96a3-ff81b159ae95\") " pod="metallb-system/controller-6968d8fdc4-7dbg4" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.103299 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fds5f\" (UniqueName: \"kubernetes.io/projected/7513569c-d113-4de0-8d1c-734db1c14659-kube-api-access-fds5f\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.612868 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-memberlist\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.613446 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b047f3e7-1d76-487b-96a3-ff81b159ae95-metrics-certs\") pod \"controller-6968d8fdc4-7dbg4\" (UID: \"b047f3e7-1d76-487b-96a3-ff81b159ae95\") " pod="metallb-system/controller-6968d8fdc4-7dbg4" Jan 27 12:45:30 crc kubenswrapper[4900]: E0127 12:45:30.613708 4900 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 27 12:45:30 crc kubenswrapper[4900]: E0127 12:45:30.613833 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-memberlist podName:7513569c-d113-4de0-8d1c-734db1c14659 nodeName:}" failed. No retries permitted until 2026-01-27 12:45:31.613798547 +0000 UTC m=+1158.850826917 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-memberlist") pod "speaker-8mc5v" (UID: "7513569c-d113-4de0-8d1c-734db1c14659") : secret "metallb-memberlist" not found Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.614258 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gnhhx" event={"ID":"ec5f276d-77b1-4fa8-b00b-7230c546a47f","Type":"ContainerStarted","Data":"4717b0b2f35a46c79f089ca504e6495ebef4ddffc7bd85ff254e4a1a973c8c55"} Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.645202 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b047f3e7-1d76-487b-96a3-ff81b159ae95-metrics-certs\") pod \"controller-6968d8fdc4-7dbg4\" (UID: \"b047f3e7-1d76-487b-96a3-ff81b159ae95\") " pod="metallb-system/controller-6968d8fdc4-7dbg4" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.789120 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-7dbg4" Jan 27 12:45:30 crc kubenswrapper[4900]: I0127 12:45:30.834232 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4"] Jan 27 12:45:31 crc kubenswrapper[4900]: I0127 12:45:31.247404 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-7dbg4"] Jan 27 12:45:31 crc kubenswrapper[4900]: I0127 12:45:31.642429 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-7dbg4" event={"ID":"b047f3e7-1d76-487b-96a3-ff81b159ae95","Type":"ContainerStarted","Data":"7f2f90537a117b95f8c686c78ed09eb78f162f23353a17cd815c73e936e2b074"} Jan 27 12:45:31 crc kubenswrapper[4900]: I0127 12:45:31.653683 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" event={"ID":"8031fe9b-6753-4ab7-abac-fece10fd066b","Type":"ContainerStarted","Data":"0de174471760782f9ecf83df9378d4d55ecfbbe2b692eccdfe07b88068b3af63"} Jan 27 12:45:31 crc kubenswrapper[4900]: I0127 12:45:31.668869 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-memberlist\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:31 crc kubenswrapper[4900]: E0127 12:45:31.669090 4900 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 27 12:45:31 crc kubenswrapper[4900]: E0127 12:45:31.669177 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-memberlist podName:7513569c-d113-4de0-8d1c-734db1c14659 nodeName:}" failed. No retries permitted until 2026-01-27 12:45:33.669151896 +0000 UTC m=+1160.906180106 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-memberlist") pod "speaker-8mc5v" (UID: "7513569c-d113-4de0-8d1c-734db1c14659") : secret "metallb-memberlist" not found Jan 27 12:45:32 crc kubenswrapper[4900]: I0127 12:45:32.733272 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-7dbg4" event={"ID":"b047f3e7-1d76-487b-96a3-ff81b159ae95","Type":"ContainerStarted","Data":"a0757622f5dab501e822fb751a5b4e1c43fe17dd0c04c507aace79b5636f30a2"} Jan 27 12:45:32 crc kubenswrapper[4900]: I0127 12:45:32.733793 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-7dbg4" event={"ID":"b047f3e7-1d76-487b-96a3-ff81b159ae95","Type":"ContainerStarted","Data":"c6bdacae7daceca68fcd2fb738c24a002689fdc51827aae58170a4511dc90e03"} Jan 27 12:45:32 crc kubenswrapper[4900]: I0127 12:45:32.735392 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-7dbg4" Jan 27 12:45:32 crc kubenswrapper[4900]: I0127 12:45:32.787957 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-7dbg4" podStartSLOduration=3.787923312 podStartE2EDuration="3.787923312s" podCreationTimestamp="2026-01-27 12:45:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:45:32.780071743 +0000 UTC m=+1160.017099943" watchObservedRunningTime="2026-01-27 12:45:32.787923312 +0000 UTC m=+1160.024951522" Jan 27 12:45:33 crc kubenswrapper[4900]: I0127 12:45:33.719600 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-memberlist\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:33 crc kubenswrapper[4900]: I0127 12:45:33.732638 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7513569c-d113-4de0-8d1c-734db1c14659-memberlist\") pod \"speaker-8mc5v\" (UID: \"7513569c-d113-4de0-8d1c-734db1c14659\") " pod="metallb-system/speaker-8mc5v" Jan 27 12:45:33 crc kubenswrapper[4900]: I0127 12:45:33.768415 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-8mc5v" Jan 27 12:45:34 crc kubenswrapper[4900]: I0127 12:45:34.782371 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-8mc5v" event={"ID":"7513569c-d113-4de0-8d1c-734db1c14659","Type":"ContainerStarted","Data":"7b51a4b3db8625ed76bfdd550c02d1163f54313b912e0964e03dcb70decf30a4"} Jan 27 12:45:34 crc kubenswrapper[4900]: I0127 12:45:34.782740 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-8mc5v" event={"ID":"7513569c-d113-4de0-8d1c-734db1c14659","Type":"ContainerStarted","Data":"fb9c2c3038b700c46aa56a66aaabfe390e5bae0b6bba2f6097e740ccc01e07f5"} Jan 27 12:45:34 crc kubenswrapper[4900]: I0127 12:45:34.782753 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-8mc5v" event={"ID":"7513569c-d113-4de0-8d1c-734db1c14659","Type":"ContainerStarted","Data":"2225cd293709b8e1b301b137863cb77443f5244ef74bdf721e097ba2a5767f86"} Jan 27 12:45:34 crc kubenswrapper[4900]: I0127 12:45:34.783680 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-8mc5v" Jan 27 12:45:36 crc kubenswrapper[4900]: I0127 12:45:36.571446 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-8mc5v" podStartSLOduration=7.571396883 podStartE2EDuration="7.571396883s" podCreationTimestamp="2026-01-27 12:45:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:45:34.815518714 +0000 UTC m=+1162.052546924" watchObservedRunningTime="2026-01-27 12:45:36.571396883 +0000 UTC m=+1163.808425093" Jan 27 12:45:40 crc kubenswrapper[4900]: I0127 12:45:40.852301 4900 generic.go:334] "Generic (PLEG): container finished" podID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerID="2ec20b15022587d51f7378af195cff1dfb315ee564e3f3e5b80118f45a8c9f8e" exitCode=0 Jan 27 12:45:40 crc kubenswrapper[4900]: I0127 12:45:40.852465 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gnhhx" event={"ID":"ec5f276d-77b1-4fa8-b00b-7230c546a47f","Type":"ContainerDied","Data":"2ec20b15022587d51f7378af195cff1dfb315ee564e3f3e5b80118f45a8c9f8e"} Jan 27 12:45:40 crc kubenswrapper[4900]: I0127 12:45:40.856505 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" event={"ID":"8031fe9b-6753-4ab7-abac-fece10fd066b","Type":"ContainerStarted","Data":"3ad00e407836ccd2af15463c034144967f0a9d4ad31f058f57d1fbd26f5b6db0"} Jan 27 12:45:40 crc kubenswrapper[4900]: I0127 12:45:40.856681 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" Jan 27 12:45:40 crc kubenswrapper[4900]: I0127 12:45:40.918535 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" podStartSLOduration=2.777538369 podStartE2EDuration="11.91850258s" podCreationTimestamp="2026-01-27 12:45:29 +0000 UTC" firstStartedPulling="2026-01-27 12:45:30.853490939 +0000 UTC m=+1158.090519149" lastFinishedPulling="2026-01-27 12:45:39.99445515 +0000 UTC m=+1167.231483360" observedRunningTime="2026-01-27 12:45:40.912113765 +0000 UTC m=+1168.149141975" watchObservedRunningTime="2026-01-27 12:45:40.91850258 +0000 UTC m=+1168.155530790" Jan 27 12:45:41 crc kubenswrapper[4900]: I0127 12:45:41.870611 4900 generic.go:334] "Generic (PLEG): container finished" podID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerID="70e1e202b43187cfa5bc7f9864aabad1bc66620800aa8590c240c40805fdde05" exitCode=0 Jan 27 12:45:41 crc kubenswrapper[4900]: I0127 12:45:41.870751 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gnhhx" event={"ID":"ec5f276d-77b1-4fa8-b00b-7230c546a47f","Type":"ContainerDied","Data":"70e1e202b43187cfa5bc7f9864aabad1bc66620800aa8590c240c40805fdde05"} Jan 27 12:45:43 crc kubenswrapper[4900]: I0127 12:45:43.068533 4900 generic.go:334] "Generic (PLEG): container finished" podID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerID="622b8d856b21b7db40a28e0f001718d05a38e758ee58d4720429bf50f68305e0" exitCode=0 Jan 27 12:45:43 crc kubenswrapper[4900]: I0127 12:45:43.068612 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gnhhx" event={"ID":"ec5f276d-77b1-4fa8-b00b-7230c546a47f","Type":"ContainerDied","Data":"622b8d856b21b7db40a28e0f001718d05a38e758ee58d4720429bf50f68305e0"} Jan 27 12:45:44 crc kubenswrapper[4900]: I0127 12:45:44.090927 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gnhhx" event={"ID":"ec5f276d-77b1-4fa8-b00b-7230c546a47f","Type":"ContainerStarted","Data":"99344f37376435666fa7be2620a6a4f5d4adb4264df0cec98ffa0ca1feb36171"} Jan 27 12:45:44 crc kubenswrapper[4900]: I0127 12:45:44.091408 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gnhhx" event={"ID":"ec5f276d-77b1-4fa8-b00b-7230c546a47f","Type":"ContainerStarted","Data":"1fe2024e0a8622ac34ff6a14a0930a30efa9fc32f2b5e8f61fc308b8b1f6771d"} Jan 27 12:45:44 crc kubenswrapper[4900]: I0127 12:45:44.091420 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gnhhx" event={"ID":"ec5f276d-77b1-4fa8-b00b-7230c546a47f","Type":"ContainerStarted","Data":"58cc2bb3c4ced73464022004c5bc29b75033657c8b232b10549b81b5ebe2e2fb"} Jan 27 12:45:45 crc kubenswrapper[4900]: I0127 12:45:45.125250 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gnhhx" event={"ID":"ec5f276d-77b1-4fa8-b00b-7230c546a47f","Type":"ContainerStarted","Data":"2121ada31146f3ca5fe9873ad60ce9b97d1137423dfae900d3f0229959ad196b"} Jan 27 12:45:45 crc kubenswrapper[4900]: I0127 12:45:45.125719 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gnhhx" event={"ID":"ec5f276d-77b1-4fa8-b00b-7230c546a47f","Type":"ContainerStarted","Data":"308f032d8372d4f664f02045d781c9949b14c3452b4d244b0dfc471c61580ea4"} Jan 27 12:45:45 crc kubenswrapper[4900]: I0127 12:45:45.125731 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gnhhx" event={"ID":"ec5f276d-77b1-4fa8-b00b-7230c546a47f","Type":"ContainerStarted","Data":"2db86daf72d0eb88753676f157cb61f58cc6e5e21009d24a1c508c536b0c5dea"} Jan 27 12:45:45 crc kubenswrapper[4900]: I0127 12:45:45.125748 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:50 crc kubenswrapper[4900]: I0127 12:45:50.033005 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:50 crc kubenswrapper[4900]: I0127 12:45:50.150449 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" Jan 27 12:45:50 crc kubenswrapper[4900]: I0127 12:45:50.184828 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-gnhhx" podStartSLOduration=11.694188755999999 podStartE2EDuration="21.184798587s" podCreationTimestamp="2026-01-27 12:45:29 +0000 UTC" firstStartedPulling="2026-01-27 12:45:30.525483158 +0000 UTC m=+1157.762511368" lastFinishedPulling="2026-01-27 12:45:40.016092989 +0000 UTC m=+1167.253121199" observedRunningTime="2026-01-27 12:45:45.163928519 +0000 UTC m=+1172.400956739" watchObservedRunningTime="2026-01-27 12:45:50.184798587 +0000 UTC m=+1177.421826797" Jan 27 12:45:50 crc kubenswrapper[4900]: I0127 12:45:50.195417 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:45:50 crc kubenswrapper[4900]: I0127 12:45:50.799856 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-7dbg4" Jan 27 12:45:52 crc kubenswrapper[4900]: I0127 12:45:52.373266 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:45:52 crc kubenswrapper[4900]: I0127 12:45:52.373380 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:45:53 crc kubenswrapper[4900]: I0127 12:45:53.772357 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-8mc5v" Jan 27 12:45:58 crc kubenswrapper[4900]: I0127 12:45:58.445406 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-rbvm7"] Jan 27 12:45:58 crc kubenswrapper[4900]: I0127 12:45:58.449597 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-rbvm7" Jan 27 12:45:58 crc kubenswrapper[4900]: I0127 12:45:58.458969 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-s2dwm" Jan 27 12:45:58 crc kubenswrapper[4900]: I0127 12:45:58.459399 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 27 12:45:58 crc kubenswrapper[4900]: I0127 12:45:58.463778 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 27 12:45:58 crc kubenswrapper[4900]: I0127 12:45:58.512897 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-rbvm7"] Jan 27 12:45:58 crc kubenswrapper[4900]: I0127 12:45:58.723050 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m96fv\" (UniqueName: \"kubernetes.io/projected/073941d4-4d0a-4181-9137-89baa4091ad9-kube-api-access-m96fv\") pod \"openstack-operator-index-rbvm7\" (UID: \"073941d4-4d0a-4181-9137-89baa4091ad9\") " pod="openstack-operators/openstack-operator-index-rbvm7" Jan 27 12:45:58 crc kubenswrapper[4900]: I0127 12:45:58.975581 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m96fv\" (UniqueName: \"kubernetes.io/projected/073941d4-4d0a-4181-9137-89baa4091ad9-kube-api-access-m96fv\") pod \"openstack-operator-index-rbvm7\" (UID: \"073941d4-4d0a-4181-9137-89baa4091ad9\") " pod="openstack-operators/openstack-operator-index-rbvm7" Jan 27 12:45:59 crc kubenswrapper[4900]: I0127 12:45:59.021699 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m96fv\" (UniqueName: \"kubernetes.io/projected/073941d4-4d0a-4181-9137-89baa4091ad9-kube-api-access-m96fv\") pod \"openstack-operator-index-rbvm7\" (UID: \"073941d4-4d0a-4181-9137-89baa4091ad9\") " pod="openstack-operators/openstack-operator-index-rbvm7" Jan 27 12:45:59 crc kubenswrapper[4900]: I0127 12:45:59.096555 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-rbvm7" Jan 27 12:45:59 crc kubenswrapper[4900]: I0127 12:45:59.673752 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-rbvm7"] Jan 27 12:45:59 crc kubenswrapper[4900]: W0127 12:45:59.685618 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod073941d4_4d0a_4181_9137_89baa4091ad9.slice/crio-47f2b27bba86d7d9ea39d7c57bec48fbc2cd4d91358a2c2039c13a726b9a9d4b WatchSource:0}: Error finding container 47f2b27bba86d7d9ea39d7c57bec48fbc2cd4d91358a2c2039c13a726b9a9d4b: Status 404 returned error can't find the container with id 47f2b27bba86d7d9ea39d7c57bec48fbc2cd4d91358a2c2039c13a726b9a9d4b Jan 27 12:46:00 crc kubenswrapper[4900]: I0127 12:46:00.000794 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-rbvm7" event={"ID":"073941d4-4d0a-4181-9137-89baa4091ad9","Type":"ContainerStarted","Data":"47f2b27bba86d7d9ea39d7c57bec48fbc2cd4d91358a2c2039c13a726b9a9d4b"} Jan 27 12:46:00 crc kubenswrapper[4900]: I0127 12:46:00.034612 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-gnhhx" Jan 27 12:46:01 crc kubenswrapper[4900]: I0127 12:46:01.102458 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-rbvm7"] Jan 27 12:46:01 crc kubenswrapper[4900]: I0127 12:46:01.714538 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-xvszg"] Jan 27 12:46:01 crc kubenswrapper[4900]: I0127 12:46:01.716453 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 12:46:01 crc kubenswrapper[4900]: I0127 12:46:01.738694 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-xvszg"] Jan 27 12:46:01 crc kubenswrapper[4900]: I0127 12:46:01.815288 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbvsm\" (UniqueName: \"kubernetes.io/projected/147d12f6-3180-41d8-92c9-55aab763d313-kube-api-access-zbvsm\") pod \"openstack-operator-index-xvszg\" (UID: \"147d12f6-3180-41d8-92c9-55aab763d313\") " pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 12:46:01 crc kubenswrapper[4900]: I0127 12:46:01.918450 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbvsm\" (UniqueName: \"kubernetes.io/projected/147d12f6-3180-41d8-92c9-55aab763d313-kube-api-access-zbvsm\") pod \"openstack-operator-index-xvszg\" (UID: \"147d12f6-3180-41d8-92c9-55aab763d313\") " pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 12:46:01 crc kubenswrapper[4900]: I0127 12:46:01.947462 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbvsm\" (UniqueName: \"kubernetes.io/projected/147d12f6-3180-41d8-92c9-55aab763d313-kube-api-access-zbvsm\") pod \"openstack-operator-index-xvszg\" (UID: \"147d12f6-3180-41d8-92c9-55aab763d313\") " pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 12:46:02 crc kubenswrapper[4900]: I0127 12:46:02.054851 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 12:46:04 crc kubenswrapper[4900]: I0127 12:46:04.069689 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-rbvm7" event={"ID":"073941d4-4d0a-4181-9137-89baa4091ad9","Type":"ContainerStarted","Data":"8531da9c778c48b16481c59f2feaac614e7f8c53d483acb627621a359c822878"} Jan 27 12:46:04 crc kubenswrapper[4900]: I0127 12:46:04.069944 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-rbvm7" podUID="073941d4-4d0a-4181-9137-89baa4091ad9" containerName="registry-server" containerID="cri-o://8531da9c778c48b16481c59f2feaac614e7f8c53d483acb627621a359c822878" gracePeriod=2 Jan 27 12:46:04 crc kubenswrapper[4900]: I0127 12:46:04.088202 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-xvszg"] Jan 27 12:46:04 crc kubenswrapper[4900]: I0127 12:46:04.121432 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-rbvm7" podStartSLOduration=3.146592375 podStartE2EDuration="7.12132826s" podCreationTimestamp="2026-01-27 12:45:57 +0000 UTC" firstStartedPulling="2026-01-27 12:45:59.68922075 +0000 UTC m=+1186.926248960" lastFinishedPulling="2026-01-27 12:46:03.663956635 +0000 UTC m=+1190.900984845" observedRunningTime="2026-01-27 12:46:04.099086003 +0000 UTC m=+1191.336114213" watchObservedRunningTime="2026-01-27 12:46:04.12132826 +0000 UTC m=+1191.358356470" Jan 27 12:46:04 crc kubenswrapper[4900]: I0127 12:46:04.509665 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-rbvm7" Jan 27 12:46:04 crc kubenswrapper[4900]: I0127 12:46:04.688749 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m96fv\" (UniqueName: \"kubernetes.io/projected/073941d4-4d0a-4181-9137-89baa4091ad9-kube-api-access-m96fv\") pod \"073941d4-4d0a-4181-9137-89baa4091ad9\" (UID: \"073941d4-4d0a-4181-9137-89baa4091ad9\") " Jan 27 12:46:04 crc kubenswrapper[4900]: I0127 12:46:04.700136 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/073941d4-4d0a-4181-9137-89baa4091ad9-kube-api-access-m96fv" (OuterVolumeSpecName: "kube-api-access-m96fv") pod "073941d4-4d0a-4181-9137-89baa4091ad9" (UID: "073941d4-4d0a-4181-9137-89baa4091ad9"). InnerVolumeSpecName "kube-api-access-m96fv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:46:04 crc kubenswrapper[4900]: I0127 12:46:04.793802 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m96fv\" (UniqueName: \"kubernetes.io/projected/073941d4-4d0a-4181-9137-89baa4091ad9-kube-api-access-m96fv\") on node \"crc\" DevicePath \"\"" Jan 27 12:46:05 crc kubenswrapper[4900]: I0127 12:46:05.082116 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-xvszg" event={"ID":"147d12f6-3180-41d8-92c9-55aab763d313","Type":"ContainerStarted","Data":"3afdbf8c69f56abc453426e1bfc981457680c7d1d0754b2efd44dd774a189048"} Jan 27 12:46:05 crc kubenswrapper[4900]: I0127 12:46:05.082636 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-xvszg" event={"ID":"147d12f6-3180-41d8-92c9-55aab763d313","Type":"ContainerStarted","Data":"37aec8e7d605d9569839e6581aa29ce4098ec2d9dfa460e2f91ceaff21cdccac"} Jan 27 12:46:05 crc kubenswrapper[4900]: I0127 12:46:05.083812 4900 generic.go:334] "Generic (PLEG): container finished" podID="073941d4-4d0a-4181-9137-89baa4091ad9" containerID="8531da9c778c48b16481c59f2feaac614e7f8c53d483acb627621a359c822878" exitCode=0 Jan 27 12:46:05 crc kubenswrapper[4900]: I0127 12:46:05.083867 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-rbvm7" event={"ID":"073941d4-4d0a-4181-9137-89baa4091ad9","Type":"ContainerDied","Data":"8531da9c778c48b16481c59f2feaac614e7f8c53d483acb627621a359c822878"} Jan 27 12:46:05 crc kubenswrapper[4900]: I0127 12:46:05.083906 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-rbvm7" event={"ID":"073941d4-4d0a-4181-9137-89baa4091ad9","Type":"ContainerDied","Data":"47f2b27bba86d7d9ea39d7c57bec48fbc2cd4d91358a2c2039c13a726b9a9d4b"} Jan 27 12:46:05 crc kubenswrapper[4900]: I0127 12:46:05.083994 4900 scope.go:117] "RemoveContainer" containerID="8531da9c778c48b16481c59f2feaac614e7f8c53d483acb627621a359c822878" Jan 27 12:46:05 crc kubenswrapper[4900]: I0127 12:46:05.084284 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-rbvm7" Jan 27 12:46:05 crc kubenswrapper[4900]: I0127 12:46:05.106606 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-xvszg" podStartSLOduration=3.950143421 podStartE2EDuration="4.106573511s" podCreationTimestamp="2026-01-27 12:46:01 +0000 UTC" firstStartedPulling="2026-01-27 12:46:04.110895857 +0000 UTC m=+1191.347924067" lastFinishedPulling="2026-01-27 12:46:04.267325957 +0000 UTC m=+1191.504354157" observedRunningTime="2026-01-27 12:46:05.103298636 +0000 UTC m=+1192.340326846" watchObservedRunningTime="2026-01-27 12:46:05.106573511 +0000 UTC m=+1192.343601721" Jan 27 12:46:05 crc kubenswrapper[4900]: I0127 12:46:05.115406 4900 scope.go:117] "RemoveContainer" containerID="8531da9c778c48b16481c59f2feaac614e7f8c53d483acb627621a359c822878" Jan 27 12:46:05 crc kubenswrapper[4900]: E0127 12:46:05.116301 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8531da9c778c48b16481c59f2feaac614e7f8c53d483acb627621a359c822878\": container with ID starting with 8531da9c778c48b16481c59f2feaac614e7f8c53d483acb627621a359c822878 not found: ID does not exist" containerID="8531da9c778c48b16481c59f2feaac614e7f8c53d483acb627621a359c822878" Jan 27 12:46:05 crc kubenswrapper[4900]: I0127 12:46:05.116409 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8531da9c778c48b16481c59f2feaac614e7f8c53d483acb627621a359c822878"} err="failed to get container status \"8531da9c778c48b16481c59f2feaac614e7f8c53d483acb627621a359c822878\": rpc error: code = NotFound desc = could not find container \"8531da9c778c48b16481c59f2feaac614e7f8c53d483acb627621a359c822878\": container with ID starting with 8531da9c778c48b16481c59f2feaac614e7f8c53d483acb627621a359c822878 not found: ID does not exist" Jan 27 12:46:05 crc kubenswrapper[4900]: I0127 12:46:05.136807 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-rbvm7"] Jan 27 12:46:05 crc kubenswrapper[4900]: I0127 12:46:05.147495 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-rbvm7"] Jan 27 12:46:06 crc kubenswrapper[4900]: I0127 12:46:06.494681 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="073941d4-4d0a-4181-9137-89baa4091ad9" path="/var/lib/kubelet/pods/073941d4-4d0a-4181-9137-89baa4091ad9/volumes" Jan 27 12:46:12 crc kubenswrapper[4900]: I0127 12:46:12.275212 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 12:46:12 crc kubenswrapper[4900]: I0127 12:46:12.283534 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 12:46:12 crc kubenswrapper[4900]: I0127 12:46:12.321525 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 12:46:13 crc kubenswrapper[4900]: I0127 12:46:13.363603 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 12:46:20 crc kubenswrapper[4900]: I0127 12:46:20.894935 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw"] Jan 27 12:46:20 crc kubenswrapper[4900]: E0127 12:46:20.896345 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="073941d4-4d0a-4181-9137-89baa4091ad9" containerName="registry-server" Jan 27 12:46:20 crc kubenswrapper[4900]: I0127 12:46:20.896382 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="073941d4-4d0a-4181-9137-89baa4091ad9" containerName="registry-server" Jan 27 12:46:20 crc kubenswrapper[4900]: I0127 12:46:20.896631 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="073941d4-4d0a-4181-9137-89baa4091ad9" containerName="registry-server" Jan 27 12:46:20 crc kubenswrapper[4900]: I0127 12:46:20.898326 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" Jan 27 12:46:20 crc kubenswrapper[4900]: I0127 12:46:20.905961 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-ttdn9" Jan 27 12:46:20 crc kubenswrapper[4900]: I0127 12:46:20.915934 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw"] Jan 27 12:46:21 crc kubenswrapper[4900]: I0127 12:46:21.026464 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcr96\" (UniqueName: \"kubernetes.io/projected/0cccb4df-e43d-41b1-9c22-98a6d24536ac-kube-api-access-bcr96\") pod \"ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw\" (UID: \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\") " pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" Jan 27 12:46:21 crc kubenswrapper[4900]: I0127 12:46:21.026561 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0cccb4df-e43d-41b1-9c22-98a6d24536ac-util\") pod \"ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw\" (UID: \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\") " pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" Jan 27 12:46:21 crc kubenswrapper[4900]: I0127 12:46:21.027394 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0cccb4df-e43d-41b1-9c22-98a6d24536ac-bundle\") pod \"ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw\" (UID: \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\") " pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" Jan 27 12:46:21 crc kubenswrapper[4900]: I0127 12:46:21.129877 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcr96\" (UniqueName: \"kubernetes.io/projected/0cccb4df-e43d-41b1-9c22-98a6d24536ac-kube-api-access-bcr96\") pod \"ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw\" (UID: \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\") " pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" Jan 27 12:46:21 crc kubenswrapper[4900]: I0127 12:46:21.129989 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0cccb4df-e43d-41b1-9c22-98a6d24536ac-util\") pod \"ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw\" (UID: \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\") " pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" Jan 27 12:46:21 crc kubenswrapper[4900]: I0127 12:46:21.130324 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0cccb4df-e43d-41b1-9c22-98a6d24536ac-bundle\") pod \"ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw\" (UID: \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\") " pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" Jan 27 12:46:21 crc kubenswrapper[4900]: I0127 12:46:21.130930 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0cccb4df-e43d-41b1-9c22-98a6d24536ac-util\") pod \"ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw\" (UID: \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\") " pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" Jan 27 12:46:21 crc kubenswrapper[4900]: I0127 12:46:21.130965 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0cccb4df-e43d-41b1-9c22-98a6d24536ac-bundle\") pod \"ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw\" (UID: \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\") " pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" Jan 27 12:46:21 crc kubenswrapper[4900]: I0127 12:46:21.154562 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcr96\" (UniqueName: \"kubernetes.io/projected/0cccb4df-e43d-41b1-9c22-98a6d24536ac-kube-api-access-bcr96\") pod \"ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw\" (UID: \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\") " pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" Jan 27 12:46:21 crc kubenswrapper[4900]: I0127 12:46:21.227778 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" Jan 27 12:46:21 crc kubenswrapper[4900]: I0127 12:46:21.683429 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw"] Jan 27 12:46:21 crc kubenswrapper[4900]: W0127 12:46:21.688373 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0cccb4df_e43d_41b1_9c22_98a6d24536ac.slice/crio-de89bfb59ded35d1920a03d6e71064fe5d262821aad6f0b64a18fe002176fc53 WatchSource:0}: Error finding container de89bfb59ded35d1920a03d6e71064fe5d262821aad6f0b64a18fe002176fc53: Status 404 returned error can't find the container with id de89bfb59ded35d1920a03d6e71064fe5d262821aad6f0b64a18fe002176fc53 Jan 27 12:46:22 crc kubenswrapper[4900]: I0127 12:46:22.373466 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:46:22 crc kubenswrapper[4900]: I0127 12:46:22.373968 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:46:22 crc kubenswrapper[4900]: I0127 12:46:22.374341 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:46:22 crc kubenswrapper[4900]: I0127 12:46:22.375648 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"311b2446296261b95a4a8935d2688a7bfd4e27781be2ac18543d5aed5bad7b0a"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 12:46:22 crc kubenswrapper[4900]: I0127 12:46:22.375758 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://311b2446296261b95a4a8935d2688a7bfd4e27781be2ac18543d5aed5bad7b0a" gracePeriod=600 Jan 27 12:46:22 crc kubenswrapper[4900]: I0127 12:46:22.422615 4900 generic.go:334] "Generic (PLEG): container finished" podID="0cccb4df-e43d-41b1-9c22-98a6d24536ac" containerID="ae1b95bb1851e52aa40d24eb70a5266e3e3654a32f18962c51c53b5bc777de14" exitCode=0 Jan 27 12:46:22 crc kubenswrapper[4900]: I0127 12:46:22.422684 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" event={"ID":"0cccb4df-e43d-41b1-9c22-98a6d24536ac","Type":"ContainerDied","Data":"ae1b95bb1851e52aa40d24eb70a5266e3e3654a32f18962c51c53b5bc777de14"} Jan 27 12:46:22 crc kubenswrapper[4900]: I0127 12:46:22.422731 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" event={"ID":"0cccb4df-e43d-41b1-9c22-98a6d24536ac","Type":"ContainerStarted","Data":"de89bfb59ded35d1920a03d6e71064fe5d262821aad6f0b64a18fe002176fc53"} Jan 27 12:46:23 crc kubenswrapper[4900]: I0127 12:46:23.439932 4900 generic.go:334] "Generic (PLEG): container finished" podID="0cccb4df-e43d-41b1-9c22-98a6d24536ac" containerID="23d064ac5956dd1a92065f230dc4e1ef483ca946dca5e0b6b39c8fe41b47329a" exitCode=0 Jan 27 12:46:23 crc kubenswrapper[4900]: I0127 12:46:23.440039 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" event={"ID":"0cccb4df-e43d-41b1-9c22-98a6d24536ac","Type":"ContainerDied","Data":"23d064ac5956dd1a92065f230dc4e1ef483ca946dca5e0b6b39c8fe41b47329a"} Jan 27 12:46:23 crc kubenswrapper[4900]: I0127 12:46:23.448108 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="311b2446296261b95a4a8935d2688a7bfd4e27781be2ac18543d5aed5bad7b0a" exitCode=0 Jan 27 12:46:23 crc kubenswrapper[4900]: I0127 12:46:23.448175 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"311b2446296261b95a4a8935d2688a7bfd4e27781be2ac18543d5aed5bad7b0a"} Jan 27 12:46:23 crc kubenswrapper[4900]: I0127 12:46:23.448215 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"d3eb521560952eab8f11162cda8d03a25740b3b833254e8284177a101ff26343"} Jan 27 12:46:23 crc kubenswrapper[4900]: I0127 12:46:23.448275 4900 scope.go:117] "RemoveContainer" containerID="3985ee48c72db877d5168b811e894771c936712a44e12eed08928d84a04baff9" Jan 27 12:46:24 crc kubenswrapper[4900]: I0127 12:46:24.462455 4900 generic.go:334] "Generic (PLEG): container finished" podID="0cccb4df-e43d-41b1-9c22-98a6d24536ac" containerID="3b7c5fd980e85b3e427b4e0fc33417a575ca21d290a3d1cbe6c2d78907fb79e1" exitCode=0 Jan 27 12:46:24 crc kubenswrapper[4900]: I0127 12:46:24.462567 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" event={"ID":"0cccb4df-e43d-41b1-9c22-98a6d24536ac","Type":"ContainerDied","Data":"3b7c5fd980e85b3e427b4e0fc33417a575ca21d290a3d1cbe6c2d78907fb79e1"} Jan 27 12:46:25 crc kubenswrapper[4900]: I0127 12:46:25.794111 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" Jan 27 12:46:25 crc kubenswrapper[4900]: I0127 12:46:25.935841 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0cccb4df-e43d-41b1-9c22-98a6d24536ac-util\") pod \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\" (UID: \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\") " Jan 27 12:46:25 crc kubenswrapper[4900]: I0127 12:46:25.935968 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0cccb4df-e43d-41b1-9c22-98a6d24536ac-bundle\") pod \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\" (UID: \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\") " Jan 27 12:46:25 crc kubenswrapper[4900]: I0127 12:46:25.936160 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bcr96\" (UniqueName: \"kubernetes.io/projected/0cccb4df-e43d-41b1-9c22-98a6d24536ac-kube-api-access-bcr96\") pod \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\" (UID: \"0cccb4df-e43d-41b1-9c22-98a6d24536ac\") " Jan 27 12:46:25 crc kubenswrapper[4900]: I0127 12:46:25.937216 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cccb4df-e43d-41b1-9c22-98a6d24536ac-bundle" (OuterVolumeSpecName: "bundle") pod "0cccb4df-e43d-41b1-9c22-98a6d24536ac" (UID: "0cccb4df-e43d-41b1-9c22-98a6d24536ac"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:46:25 crc kubenswrapper[4900]: I0127 12:46:25.945766 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cccb4df-e43d-41b1-9c22-98a6d24536ac-kube-api-access-bcr96" (OuterVolumeSpecName: "kube-api-access-bcr96") pod "0cccb4df-e43d-41b1-9c22-98a6d24536ac" (UID: "0cccb4df-e43d-41b1-9c22-98a6d24536ac"). InnerVolumeSpecName "kube-api-access-bcr96". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:46:25 crc kubenswrapper[4900]: I0127 12:46:25.951029 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cccb4df-e43d-41b1-9c22-98a6d24536ac-util" (OuterVolumeSpecName: "util") pod "0cccb4df-e43d-41b1-9c22-98a6d24536ac" (UID: "0cccb4df-e43d-41b1-9c22-98a6d24536ac"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:46:26 crc kubenswrapper[4900]: I0127 12:46:26.038778 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bcr96\" (UniqueName: \"kubernetes.io/projected/0cccb4df-e43d-41b1-9c22-98a6d24536ac-kube-api-access-bcr96\") on node \"crc\" DevicePath \"\"" Jan 27 12:46:26 crc kubenswrapper[4900]: I0127 12:46:26.038839 4900 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0cccb4df-e43d-41b1-9c22-98a6d24536ac-util\") on node \"crc\" DevicePath \"\"" Jan 27 12:46:26 crc kubenswrapper[4900]: I0127 12:46:26.038909 4900 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0cccb4df-e43d-41b1-9c22-98a6d24536ac-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:46:26 crc kubenswrapper[4900]: I0127 12:46:26.496081 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" event={"ID":"0cccb4df-e43d-41b1-9c22-98a6d24536ac","Type":"ContainerDied","Data":"de89bfb59ded35d1920a03d6e71064fe5d262821aad6f0b64a18fe002176fc53"} Jan 27 12:46:26 crc kubenswrapper[4900]: I0127 12:46:26.496477 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de89bfb59ded35d1920a03d6e71064fe5d262821aad6f0b64a18fe002176fc53" Jan 27 12:46:26 crc kubenswrapper[4900]: I0127 12:46:26.496609 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw" Jan 27 12:46:42 crc kubenswrapper[4900]: I0127 12:46:42.206463 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll"] Jan 27 12:46:42 crc kubenswrapper[4900]: E0127 12:46:42.208948 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cccb4df-e43d-41b1-9c22-98a6d24536ac" containerName="pull" Jan 27 12:46:42 crc kubenswrapper[4900]: I0127 12:46:42.208991 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cccb4df-e43d-41b1-9c22-98a6d24536ac" containerName="pull" Jan 27 12:46:42 crc kubenswrapper[4900]: E0127 12:46:42.209041 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cccb4df-e43d-41b1-9c22-98a6d24536ac" containerName="extract" Jan 27 12:46:42 crc kubenswrapper[4900]: I0127 12:46:42.209050 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cccb4df-e43d-41b1-9c22-98a6d24536ac" containerName="extract" Jan 27 12:46:42 crc kubenswrapper[4900]: E0127 12:46:42.209086 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cccb4df-e43d-41b1-9c22-98a6d24536ac" containerName="util" Jan 27 12:46:42 crc kubenswrapper[4900]: I0127 12:46:42.209095 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cccb4df-e43d-41b1-9c22-98a6d24536ac" containerName="util" Jan 27 12:46:42 crc kubenswrapper[4900]: I0127 12:46:42.209401 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cccb4df-e43d-41b1-9c22-98a6d24536ac" containerName="extract" Jan 27 12:46:42 crc kubenswrapper[4900]: I0127 12:46:42.210837 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" Jan 27 12:46:42 crc kubenswrapper[4900]: I0127 12:46:42.214698 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-k8mrv" Jan 27 12:46:42 crc kubenswrapper[4900]: I0127 12:46:42.233075 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll"] Jan 27 12:46:42 crc kubenswrapper[4900]: I0127 12:46:42.369474 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5fhq\" (UniqueName: \"kubernetes.io/projected/f3cc1727-3d00-43f4-92c3-5ef428297727-kube-api-access-l5fhq\") pod \"openstack-operator-controller-init-d8fd5ccf5-5h9ll\" (UID: \"f3cc1727-3d00-43f4-92c3-5ef428297727\") " pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" Jan 27 12:46:42 crc kubenswrapper[4900]: I0127 12:46:42.471222 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5fhq\" (UniqueName: \"kubernetes.io/projected/f3cc1727-3d00-43f4-92c3-5ef428297727-kube-api-access-l5fhq\") pod \"openstack-operator-controller-init-d8fd5ccf5-5h9ll\" (UID: \"f3cc1727-3d00-43f4-92c3-5ef428297727\") " pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" Jan 27 12:46:42 crc kubenswrapper[4900]: I0127 12:46:42.497187 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5fhq\" (UniqueName: \"kubernetes.io/projected/f3cc1727-3d00-43f4-92c3-5ef428297727-kube-api-access-l5fhq\") pod \"openstack-operator-controller-init-d8fd5ccf5-5h9ll\" (UID: \"f3cc1727-3d00-43f4-92c3-5ef428297727\") " pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" Jan 27 12:46:42 crc kubenswrapper[4900]: I0127 12:46:42.535401 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" Jan 27 12:46:43 crc kubenswrapper[4900]: I0127 12:46:43.065236 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll"] Jan 27 12:46:43 crc kubenswrapper[4900]: I0127 12:46:43.744949 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" event={"ID":"f3cc1727-3d00-43f4-92c3-5ef428297727","Type":"ContainerStarted","Data":"cc5754835720e2ee5f23447695c52ba225781074e76557b515b50543eeb0282a"} Jan 27 12:46:48 crc kubenswrapper[4900]: I0127 12:46:48.791344 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" event={"ID":"f3cc1727-3d00-43f4-92c3-5ef428297727","Type":"ContainerStarted","Data":"df5d84d1f84aff642214567b2f892011fbfe8522a4b4439f82a43e5297f2420b"} Jan 27 12:46:48 crc kubenswrapper[4900]: I0127 12:46:48.792111 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" Jan 27 12:46:48 crc kubenswrapper[4900]: I0127 12:46:48.831207 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" podStartSLOduration=1.473588752 podStartE2EDuration="6.831136165s" podCreationTimestamp="2026-01-27 12:46:42 +0000 UTC" firstStartedPulling="2026-01-27 12:46:43.087490041 +0000 UTC m=+1230.324518251" lastFinishedPulling="2026-01-27 12:46:48.445037454 +0000 UTC m=+1235.682065664" observedRunningTime="2026-01-27 12:46:48.82203013 +0000 UTC m=+1236.059058350" watchObservedRunningTime="2026-01-27 12:46:48.831136165 +0000 UTC m=+1236.068164375" Jan 27 12:47:02 crc kubenswrapper[4900]: I0127 12:47:02.539095 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.804487 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx"] Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.806419 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.810573 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-wpq8t" Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.814610 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z"] Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.816140 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.818746 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-6fj6f" Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.828719 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z"] Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.843186 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh"] Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.845287 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.850465 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx"] Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.852765 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-cvggw" Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.891173 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh"] Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.916848 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp"] Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.918824 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.928907 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln"] Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.930451 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.936789 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-hc9cx" Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.944328 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-vfv6z" Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.969643 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln"] Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.982298 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvsvq\" (UniqueName: \"kubernetes.io/projected/b094071d-c368-40e6-8515-a17d0a22a868-kube-api-access-zvsvq\") pod \"designate-operator-controller-manager-77554cdc5c-jxgdh\" (UID: \"b094071d-c368-40e6-8515-a17d0a22a868\") " pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.982442 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmk66\" (UniqueName: \"kubernetes.io/projected/1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc-kube-api-access-qmk66\") pod \"cinder-operator-controller-manager-655bf9cfbb-7wh5z\" (UID: \"1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc\") " pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.982534 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52sn6\" (UniqueName: \"kubernetes.io/projected/e3cba13f-5396-4c71-8f81-d2d932baca1f-kube-api-access-52sn6\") pod \"barbican-operator-controller-manager-65ff799cfd-z72lx\" (UID: \"e3cba13f-5396-4c71-8f81-d2d932baca1f\") " pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" Jan 27 12:47:19 crc kubenswrapper[4900]: I0127 12:47:19.987159 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.008435 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.013766 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.021083 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-sc7kf" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.023349 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.071246 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.072999 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.077531 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-s5tb4" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.077804 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.086332 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.087446 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jb9qx\" (UniqueName: \"kubernetes.io/projected/5155088c-b873-4fac-b1e9-87f57c2fae68-kube-api-access-jb9qx\") pod \"heat-operator-controller-manager-575ffb885b-wvhzp\" (UID: \"5155088c-b873-4fac-b1e9-87f57c2fae68\") " pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.087529 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tt827\" (UniqueName: \"kubernetes.io/projected/e8b4a268-6430-4f23-bd93-aa62b52710a6-kube-api-access-tt827\") pod \"glance-operator-controller-manager-67dd55ff59-dl6ln\" (UID: \"e8b4a268-6430-4f23-bd93-aa62b52710a6\") " pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.087623 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmk66\" (UniqueName: \"kubernetes.io/projected/1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc-kube-api-access-qmk66\") pod \"cinder-operator-controller-manager-655bf9cfbb-7wh5z\" (UID: \"1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc\") " pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.087778 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.088039 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52sn6\" (UniqueName: \"kubernetes.io/projected/e3cba13f-5396-4c71-8f81-d2d932baca1f-kube-api-access-52sn6\") pod \"barbican-operator-controller-manager-65ff799cfd-z72lx\" (UID: \"e3cba13f-5396-4c71-8f81-d2d932baca1f\") " pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.088298 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvsvq\" (UniqueName: \"kubernetes.io/projected/b094071d-c368-40e6-8515-a17d0a22a868-kube-api-access-zvsvq\") pod \"designate-operator-controller-manager-77554cdc5c-jxgdh\" (UID: \"b094071d-c368-40e6-8515-a17d0a22a868\") " pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.091943 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-2lq7x" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.144710 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvsvq\" (UniqueName: \"kubernetes.io/projected/b094071d-c368-40e6-8515-a17d0a22a868-kube-api-access-zvsvq\") pod \"designate-operator-controller-manager-77554cdc5c-jxgdh\" (UID: \"b094071d-c368-40e6-8515-a17d0a22a868\") " pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.147541 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52sn6\" (UniqueName: \"kubernetes.io/projected/e3cba13f-5396-4c71-8f81-d2d932baca1f-kube-api-access-52sn6\") pod \"barbican-operator-controller-manager-65ff799cfd-z72lx\" (UID: \"e3cba13f-5396-4c71-8f81-d2d932baca1f\") " pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.155913 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.156270 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmk66\" (UniqueName: \"kubernetes.io/projected/1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc-kube-api-access-qmk66\") pod \"cinder-operator-controller-manager-655bf9cfbb-7wh5z\" (UID: \"1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc\") " pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.162497 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.171372 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.190716 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxpmm\" (UniqueName: \"kubernetes.io/projected/175105c2-dfc2-4752-bf75-a027d86dc373-kube-api-access-wxpmm\") pod \"horizon-operator-controller-manager-77d5c5b54f-82q5c\" (UID: \"175105c2-dfc2-4752-bf75-a027d86dc373\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.190826 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxhbb\" (UniqueName: \"kubernetes.io/projected/76f1d09b-01aa-4c81-b568-8ffb58182475-kube-api-access-rxhbb\") pod \"infra-operator-controller-manager-7d75bc88d5-6xhcl\" (UID: \"76f1d09b-01aa-4c81-b568-8ffb58182475\") " pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.190931 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert\") pod \"infra-operator-controller-manager-7d75bc88d5-6xhcl\" (UID: \"76f1d09b-01aa-4c81-b568-8ffb58182475\") " pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.191003 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stmc5\" (UniqueName: \"kubernetes.io/projected/4715fe70-acab-4dea-adde-68e1a6e8cb28-kube-api-access-stmc5\") pod \"ironic-operator-controller-manager-768b776ffb-2cndf\" (UID: \"4715fe70-acab-4dea-adde-68e1a6e8cb28\") " pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.191078 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jb9qx\" (UniqueName: \"kubernetes.io/projected/5155088c-b873-4fac-b1e9-87f57c2fae68-kube-api-access-jb9qx\") pod \"heat-operator-controller-manager-575ffb885b-wvhzp\" (UID: \"5155088c-b873-4fac-b1e9-87f57c2fae68\") " pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.191139 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tt827\" (UniqueName: \"kubernetes.io/projected/e8b4a268-6430-4f23-bd93-aa62b52710a6-kube-api-access-tt827\") pod \"glance-operator-controller-manager-67dd55ff59-dl6ln\" (UID: \"e8b4a268-6430-4f23-bd93-aa62b52710a6\") " pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.205403 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.219184 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.220939 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.238987 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.282764 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-drfrs" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.285379 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tt827\" (UniqueName: \"kubernetes.io/projected/e8b4a268-6430-4f23-bd93-aa62b52710a6-kube-api-access-tt827\") pod \"glance-operator-controller-manager-67dd55ff59-dl6ln\" (UID: \"e8b4a268-6430-4f23-bd93-aa62b52710a6\") " pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.292929 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxpmm\" (UniqueName: \"kubernetes.io/projected/175105c2-dfc2-4752-bf75-a027d86dc373-kube-api-access-wxpmm\") pod \"horizon-operator-controller-manager-77d5c5b54f-82q5c\" (UID: \"175105c2-dfc2-4752-bf75-a027d86dc373\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.293022 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxhbb\" (UniqueName: \"kubernetes.io/projected/76f1d09b-01aa-4c81-b568-8ffb58182475-kube-api-access-rxhbb\") pod \"infra-operator-controller-manager-7d75bc88d5-6xhcl\" (UID: \"76f1d09b-01aa-4c81-b568-8ffb58182475\") " pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.301416 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert\") pod \"infra-operator-controller-manager-7d75bc88d5-6xhcl\" (UID: \"76f1d09b-01aa-4c81-b568-8ffb58182475\") " pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.301552 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stmc5\" (UniqueName: \"kubernetes.io/projected/4715fe70-acab-4dea-adde-68e1a6e8cb28-kube-api-access-stmc5\") pod \"ironic-operator-controller-manager-768b776ffb-2cndf\" (UID: \"4715fe70-acab-4dea-adde-68e1a6e8cb28\") " pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" Jan 27 12:47:20 crc kubenswrapper[4900]: E0127 12:47:20.302358 4900 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 27 12:47:20 crc kubenswrapper[4900]: E0127 12:47:20.302479 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert podName:76f1d09b-01aa-4c81-b568-8ffb58182475 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:20.802443069 +0000 UTC m=+1268.039471279 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert") pod "infra-operator-controller-manager-7d75bc88d5-6xhcl" (UID: "76f1d09b-01aa-4c81-b568-8ffb58182475") : secret "infra-operator-webhook-server-cert" not found Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.338968 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jb9qx\" (UniqueName: \"kubernetes.io/projected/5155088c-b873-4fac-b1e9-87f57c2fae68-kube-api-access-jb9qx\") pod \"heat-operator-controller-manager-575ffb885b-wvhzp\" (UID: \"5155088c-b873-4fac-b1e9-87f57c2fae68\") " pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.343702 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.349292 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxhbb\" (UniqueName: \"kubernetes.io/projected/76f1d09b-01aa-4c81-b568-8ffb58182475-kube-api-access-rxhbb\") pod \"infra-operator-controller-manager-7d75bc88d5-6xhcl\" (UID: \"76f1d09b-01aa-4c81-b568-8ffb58182475\") " pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.386128 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxpmm\" (UniqueName: \"kubernetes.io/projected/175105c2-dfc2-4752-bf75-a027d86dc373-kube-api-access-wxpmm\") pod \"horizon-operator-controller-manager-77d5c5b54f-82q5c\" (UID: \"175105c2-dfc2-4752-bf75-a027d86dc373\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.388793 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.412908 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.440794 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gdbh\" (UniqueName: \"kubernetes.io/projected/e8365f5d-b2f2-4cab-a803-e722c65ae307-kube-api-access-7gdbh\") pod \"keystone-operator-controller-manager-55f684fd56-ns96g\" (UID: \"e8365f5d-b2f2-4cab-a803-e722c65ae307\") " pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.442083 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.445561 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-j2s8h" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.447633 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stmc5\" (UniqueName: \"kubernetes.io/projected/4715fe70-acab-4dea-adde-68e1a6e8cb28-kube-api-access-stmc5\") pod \"ironic-operator-controller-manager-768b776ffb-2cndf\" (UID: \"4715fe70-acab-4dea-adde-68e1a6e8cb28\") " pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.458404 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.464028 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-j66qv" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.469938 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.482830 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.488338 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-dzg6s" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.546398 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8b7s\" (UniqueName: \"kubernetes.io/projected/a6dce274-9090-44fc-ac6b-6e164e5b7192-kube-api-access-z8b7s\") pod \"mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r\" (UID: \"a6dce274-9090-44fc-ac6b-6e164e5b7192\") " pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.547109 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.548848 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bwln\" (UniqueName: \"kubernetes.io/projected/4967ec79-a9dd-438a-9cb7-b89b3af09ff5-kube-api-access-2bwln\") pod \"manila-operator-controller-manager-849fcfbb6b-9m826\" (UID: \"4967ec79-a9dd-438a-9cb7-b89b3af09ff5\") " pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.549028 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gdbh\" (UniqueName: \"kubernetes.io/projected/e8365f5d-b2f2-4cab-a803-e722c65ae307-kube-api-access-7gdbh\") pod \"keystone-operator-controller-manager-55f684fd56-ns96g\" (UID: \"e8365f5d-b2f2-4cab-a803-e722c65ae307\") " pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.569947 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gdbh\" (UniqueName: \"kubernetes.io/projected/e8365f5d-b2f2-4cab-a803-e722c65ae307-kube-api-access-7gdbh\") pod \"keystone-operator-controller-manager-55f684fd56-ns96g\" (UID: \"e8365f5d-b2f2-4cab-a803-e722c65ae307\") " pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.571898 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.589292 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.589339 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.589350 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.636393 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.638380 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.652466 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-pnswr" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.653039 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bwln\" (UniqueName: \"kubernetes.io/projected/4967ec79-a9dd-438a-9cb7-b89b3af09ff5-kube-api-access-2bwln\") pod \"manila-operator-controller-manager-849fcfbb6b-9m826\" (UID: \"4967ec79-a9dd-438a-9cb7-b89b3af09ff5\") " pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.653152 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsb2j\" (UniqueName: \"kubernetes.io/projected/5104a740-a23d-4ea4-a186-97768d490075-kube-api-access-qsb2j\") pod \"neutron-operator-controller-manager-7ffd8d76d4-xfh5t\" (UID: \"5104a740-a23d-4ea4-a186-97768d490075\") " pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.653321 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8b7s\" (UniqueName: \"kubernetes.io/projected/a6dce274-9090-44fc-ac6b-6e164e5b7192-kube-api-access-z8b7s\") pod \"mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r\" (UID: \"a6dce274-9090-44fc-ac6b-6e164e5b7192\") " pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.679136 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.682098 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.690036 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8b7s\" (UniqueName: \"kubernetes.io/projected/a6dce274-9090-44fc-ac6b-6e164e5b7192-kube-api-access-z8b7s\") pod \"mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r\" (UID: \"a6dce274-9090-44fc-ac6b-6e164e5b7192\") " pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.693786 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.704355 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.706281 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.718030 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-l467g" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.727752 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bwln\" (UniqueName: \"kubernetes.io/projected/4967ec79-a9dd-438a-9cb7-b89b3af09ff5-kube-api-access-2bwln\") pod \"manila-operator-controller-manager-849fcfbb6b-9m826\" (UID: \"4967ec79-a9dd-438a-9cb7-b89b3af09ff5\") " pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.732748 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.740907 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.752126 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.761447 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4kgk\" (UniqueName: \"kubernetes.io/projected/a988e8ab-311d-4b6a-a75e-c49601a77d46-kube-api-access-v4kgk\") pod \"nova-operator-controller-manager-fbd766fb6-25hgc\" (UID: \"a988e8ab-311d-4b6a-a75e-c49601a77d46\") " pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.761954 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsb2j\" (UniqueName: \"kubernetes.io/projected/5104a740-a23d-4ea4-a186-97768d490075-kube-api-access-qsb2j\") pod \"neutron-operator-controller-manager-7ffd8d76d4-xfh5t\" (UID: \"5104a740-a23d-4ea4-a186-97768d490075\") " pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.765544 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.786695 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.787615 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.791728 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsb2j\" (UniqueName: \"kubernetes.io/projected/5104a740-a23d-4ea4-a186-97768d490075-kube-api-access-qsb2j\") pod \"neutron-operator-controller-manager-7ffd8d76d4-xfh5t\" (UID: \"5104a740-a23d-4ea4-a186-97768d490075\") " pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.791908 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-6qjg6" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.791953 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.803696 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.807171 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.823299 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.824954 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.838735 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-nlt2t" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.840413 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-zp8x9" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.845789 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.852342 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.856942 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.857627 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.859248 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.864885 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85462bq5\" (UID: \"899811c4-fce0-42df-b3e7-9b1495cad676\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.864950 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrjbr\" (UniqueName: \"kubernetes.io/projected/65b8356b-f64f-4cb8-94af-6b8d45448a63-kube-api-access-lrjbr\") pod \"octavia-operator-controller-manager-7875d7675-j95fk\" (UID: \"65b8356b-f64f-4cb8-94af-6b8d45448a63\") " pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.865044 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xcv6\" (UniqueName: \"kubernetes.io/projected/899811c4-fce0-42df-b3e7-9b1495cad676-kube-api-access-2xcv6\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85462bq5\" (UID: \"899811c4-fce0-42df-b3e7-9b1495cad676\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.865105 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4kgk\" (UniqueName: \"kubernetes.io/projected/a988e8ab-311d-4b6a-a75e-c49601a77d46-kube-api-access-v4kgk\") pod \"nova-operator-controller-manager-fbd766fb6-25hgc\" (UID: \"a988e8ab-311d-4b6a-a75e-c49601a77d46\") " pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.865217 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert\") pod \"infra-operator-controller-manager-7d75bc88d5-6xhcl\" (UID: \"76f1d09b-01aa-4c81-b568-8ffb58182475\") " pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:47:20 crc kubenswrapper[4900]: E0127 12:47:20.865451 4900 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 27 12:47:20 crc kubenswrapper[4900]: E0127 12:47:20.865537 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert podName:76f1d09b-01aa-4c81-b568-8ffb58182475 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:21.865495332 +0000 UTC m=+1269.102523542 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert") pod "infra-operator-controller-manager-7d75bc88d5-6xhcl" (UID: "76f1d09b-01aa-4c81-b568-8ffb58182475") : secret "infra-operator-webhook-server-cert" not found Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.879046 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-z29cp" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.884796 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.895262 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4kgk\" (UniqueName: \"kubernetes.io/projected/a988e8ab-311d-4b6a-a75e-c49601a77d46-kube-api-access-v4kgk\") pod \"nova-operator-controller-manager-fbd766fb6-25hgc\" (UID: \"a988e8ab-311d-4b6a-a75e-c49601a77d46\") " pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.904786 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.920462 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns"] Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.927097 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.947593 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-wckgv" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.969029 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xcv6\" (UniqueName: \"kubernetes.io/projected/899811c4-fce0-42df-b3e7-9b1495cad676-kube-api-access-2xcv6\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85462bq5\" (UID: \"899811c4-fce0-42df-b3e7-9b1495cad676\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.969314 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85462bq5\" (UID: \"899811c4-fce0-42df-b3e7-9b1495cad676\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.969371 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrjbr\" (UniqueName: \"kubernetes.io/projected/65b8356b-f64f-4cb8-94af-6b8d45448a63-kube-api-access-lrjbr\") pod \"octavia-operator-controller-manager-7875d7675-j95fk\" (UID: \"65b8356b-f64f-4cb8-94af-6b8d45448a63\") " pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.969420 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sflpv\" (UniqueName: \"kubernetes.io/projected/70b6c48f-4c95-468f-a792-abe4e318948f-kube-api-access-sflpv\") pod \"placement-operator-controller-manager-79d5ccc684-rdllj\" (UID: \"70b6c48f-4c95-468f-a792-abe4e318948f\") " pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.969525 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hsz97\" (UniqueName: \"kubernetes.io/projected/0c0782a0-6d83-4760-82dd-cea358647713-kube-api-access-hsz97\") pod \"ovn-operator-controller-manager-6f75f45d54-fkq26\" (UID: \"0c0782a0-6d83-4760-82dd-cea358647713\") " pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.969569 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmxcb\" (UniqueName: \"kubernetes.io/projected/5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7-kube-api-access-lmxcb\") pod \"swift-operator-controller-manager-547cbdb99f-dkghw\" (UID: \"5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" Jan 27 12:47:20 crc kubenswrapper[4900]: E0127 12:47:20.970223 4900 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 12:47:20 crc kubenswrapper[4900]: E0127 12:47:20.970275 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert podName:899811c4-fce0-42df-b3e7-9b1495cad676 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:21.470258312 +0000 UTC m=+1268.707286522 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" (UID: "899811c4-fce0-42df-b3e7-9b1495cad676") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 12:47:20 crc kubenswrapper[4900]: I0127 12:47:20.979806 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns"] Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.010414 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xcv6\" (UniqueName: \"kubernetes.io/projected/899811c4-fce0-42df-b3e7-9b1495cad676-kube-api-access-2xcv6\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85462bq5\" (UID: \"899811c4-fce0-42df-b3e7-9b1495cad676\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.017216 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrjbr\" (UniqueName: \"kubernetes.io/projected/65b8356b-f64f-4cb8-94af-6b8d45448a63-kube-api-access-lrjbr\") pod \"octavia-operator-controller-manager-7875d7675-j95fk\" (UID: \"65b8356b-f64f-4cb8-94af-6b8d45448a63\") " pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.034771 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.045013 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc"] Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.046816 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.052139 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-829cc" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.067787 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc"] Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.071357 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hntp4\" (UniqueName: \"kubernetes.io/projected/1759ba9c-7c4a-4380-81f5-e67d8e418fa1-kube-api-access-hntp4\") pod \"telemetry-operator-controller-manager-659968c8f5-zkwns\" (UID: \"1759ba9c-7c4a-4380-81f5-e67d8e418fa1\") " pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.071503 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sflpv\" (UniqueName: \"kubernetes.io/projected/70b6c48f-4c95-468f-a792-abe4e318948f-kube-api-access-sflpv\") pod \"placement-operator-controller-manager-79d5ccc684-rdllj\" (UID: \"70b6c48f-4c95-468f-a792-abe4e318948f\") " pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.071592 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hsz97\" (UniqueName: \"kubernetes.io/projected/0c0782a0-6d83-4760-82dd-cea358647713-kube-api-access-hsz97\") pod \"ovn-operator-controller-manager-6f75f45d54-fkq26\" (UID: \"0c0782a0-6d83-4760-82dd-cea358647713\") " pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.071636 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmxcb\" (UniqueName: \"kubernetes.io/projected/5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7-kube-api-access-lmxcb\") pod \"swift-operator-controller-manager-547cbdb99f-dkghw\" (UID: \"5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.078549 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.082417 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz"] Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.085349 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.096263 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-fs5zb" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.097018 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hsz97\" (UniqueName: \"kubernetes.io/projected/0c0782a0-6d83-4760-82dd-cea358647713-kube-api-access-hsz97\") pod \"ovn-operator-controller-manager-6f75f45d54-fkq26\" (UID: \"0c0782a0-6d83-4760-82dd-cea358647713\") " pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.097049 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sflpv\" (UniqueName: \"kubernetes.io/projected/70b6c48f-4c95-468f-a792-abe4e318948f-kube-api-access-sflpv\") pod \"placement-operator-controller-manager-79d5ccc684-rdllj\" (UID: \"70b6c48f-4c95-468f-a792-abe4e318948f\") " pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.097936 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmxcb\" (UniqueName: \"kubernetes.io/projected/5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7-kube-api-access-lmxcb\") pod \"swift-operator-controller-manager-547cbdb99f-dkghw\" (UID: \"5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.140989 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz"] Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.171494 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" event={"ID":"1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc","Type":"ContainerStarted","Data":"ef011e3855a1367f27fa352ecc0d03738491247432d4ca9470aefe0566160a3d"} Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.174288 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8p5s9\" (UniqueName: \"kubernetes.io/projected/be0258a0-aba9-4900-b507-4767b2726a69-kube-api-access-8p5s9\") pod \"watcher-operator-controller-manager-7579fb95dd-5zsrz\" (UID: \"be0258a0-aba9-4900-b507-4767b2726a69\") " pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.174372 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hntp4\" (UniqueName: \"kubernetes.io/projected/1759ba9c-7c4a-4380-81f5-e67d8e418fa1-kube-api-access-hntp4\") pod \"telemetry-operator-controller-manager-659968c8f5-zkwns\" (UID: \"1759ba9c-7c4a-4380-81f5-e67d8e418fa1\") " pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.174468 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccblw\" (UniqueName: \"kubernetes.io/projected/5d4cc48d-12ab-458e-bf29-bc87a182f5c3-kube-api-access-ccblw\") pod \"test-operator-controller-manager-69797bbcbd-krlqc\" (UID: \"5d4cc48d-12ab-458e-bf29-bc87a182f5c3\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.180003 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.203643 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l"] Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.205831 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.213656 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hntp4\" (UniqueName: \"kubernetes.io/projected/1759ba9c-7c4a-4380-81f5-e67d8e418fa1-kube-api-access-hntp4\") pod \"telemetry-operator-controller-manager-659968c8f5-zkwns\" (UID: \"1759ba9c-7c4a-4380-81f5-e67d8e418fa1\") " pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.214313 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l"] Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.214908 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.214996 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-7ht8p" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.215405 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.220942 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.261682 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6"] Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.268773 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.276701 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-k6779" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.279099 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8p5s9\" (UniqueName: \"kubernetes.io/projected/be0258a0-aba9-4900-b507-4767b2726a69-kube-api-access-8p5s9\") pod \"watcher-operator-controller-manager-7579fb95dd-5zsrz\" (UID: \"be0258a0-aba9-4900-b507-4767b2726a69\") " pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.279291 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.279365 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccblw\" (UniqueName: \"kubernetes.io/projected/5d4cc48d-12ab-458e-bf29-bc87a182f5c3-kube-api-access-ccblw\") pod \"test-operator-controller-manager-69797bbcbd-krlqc\" (UID: \"5d4cc48d-12ab-458e-bf29-bc87a182f5c3\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.279520 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swwxr\" (UniqueName: \"kubernetes.io/projected/e08773f7-5eaf-4a76-b671-0681c02a3471-kube-api-access-swwxr\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.279601 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.283048 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6"] Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.304376 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.316007 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccblw\" (UniqueName: \"kubernetes.io/projected/5d4cc48d-12ab-458e-bf29-bc87a182f5c3-kube-api-access-ccblw\") pod \"test-operator-controller-manager-69797bbcbd-krlqc\" (UID: \"5d4cc48d-12ab-458e-bf29-bc87a182f5c3\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.327974 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8p5s9\" (UniqueName: \"kubernetes.io/projected/be0258a0-aba9-4900-b507-4767b2726a69-kube-api-access-8p5s9\") pod \"watcher-operator-controller-manager-7579fb95dd-5zsrz\" (UID: \"be0258a0-aba9-4900-b507-4767b2726a69\") " pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.339388 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.382843 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5f8c2\" (UniqueName: \"kubernetes.io/projected/a9e9714e-647d-42a9-9073-1cbd72a6b647-kube-api-access-5f8c2\") pod \"rabbitmq-cluster-operator-manager-668c99d594-92ln6\" (UID: \"a9e9714e-647d-42a9-9073-1cbd72a6b647\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.383222 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.383591 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swwxr\" (UniqueName: \"kubernetes.io/projected/e08773f7-5eaf-4a76-b671-0681c02a3471-kube-api-access-swwxr\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:21 crc kubenswrapper[4900]: E0127 12:47:21.383630 4900 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 27 12:47:21 crc kubenswrapper[4900]: E0127 12:47:21.383747 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs podName:e08773f7-5eaf-4a76-b671-0681c02a3471 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:21.88371761 +0000 UTC m=+1269.120745820 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs") pod "openstack-operator-controller-manager-7c8f46b9cc-4h24l" (UID: "e08773f7-5eaf-4a76-b671-0681c02a3471") : secret "metrics-server-cert" not found Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.383787 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:21 crc kubenswrapper[4900]: E0127 12:47:21.384210 4900 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 27 12:47:21 crc kubenswrapper[4900]: E0127 12:47:21.384238 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs podName:e08773f7-5eaf-4a76-b671-0681c02a3471 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:21.884230835 +0000 UTC m=+1269.121259045 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs") pod "openstack-operator-controller-manager-7c8f46b9cc-4h24l" (UID: "e08773f7-5eaf-4a76-b671-0681c02a3471") : secret "webhook-server-cert" not found Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.400635 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.419723 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swwxr\" (UniqueName: \"kubernetes.io/projected/e08773f7-5eaf-4a76-b671-0681c02a3471-kube-api-access-swwxr\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.449289 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.478464 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z"] Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.489671 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85462bq5\" (UID: \"899811c4-fce0-42df-b3e7-9b1495cad676\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:47:21 crc kubenswrapper[4900]: E0127 12:47:21.491589 4900 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 12:47:21 crc kubenswrapper[4900]: E0127 12:47:21.491715 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert podName:899811c4-fce0-42df-b3e7-9b1495cad676 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:22.491678903 +0000 UTC m=+1269.728707113 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" (UID: "899811c4-fce0-42df-b3e7-9b1495cad676") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.493731 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5f8c2\" (UniqueName: \"kubernetes.io/projected/a9e9714e-647d-42a9-9073-1cbd72a6b647-kube-api-access-5f8c2\") pod \"rabbitmq-cluster-operator-manager-668c99d594-92ln6\" (UID: \"a9e9714e-647d-42a9-9073-1cbd72a6b647\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.529653 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5f8c2\" (UniqueName: \"kubernetes.io/projected/a9e9714e-647d-42a9-9073-1cbd72a6b647-kube-api-access-5f8c2\") pod \"rabbitmq-cluster-operator-manager-668c99d594-92ln6\" (UID: \"a9e9714e-647d-42a9-9073-1cbd72a6b647\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.532197 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.564990 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx"] Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.581335 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh"] Jan 27 12:47:21 crc kubenswrapper[4900]: W0127 12:47:21.776504 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8b4a268_6430_4f23_bd93_aa62b52710a6.slice/crio-00b0a11ee9cdb19d15a0fab268fe2a698a86c0ff6ec53d9cf1cffed7a06854d9 WatchSource:0}: Error finding container 00b0a11ee9cdb19d15a0fab268fe2a698a86c0ff6ec53d9cf1cffed7a06854d9: Status 404 returned error can't find the container with id 00b0a11ee9cdb19d15a0fab268fe2a698a86c0ff6ec53d9cf1cffed7a06854d9 Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.776973 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln"] Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.794179 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp"] Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.907584 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.907654 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert\") pod \"infra-operator-controller-manager-7d75bc88d5-6xhcl\" (UID: \"76f1d09b-01aa-4c81-b568-8ffb58182475\") " pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:47:21 crc kubenswrapper[4900]: E0127 12:47:21.907795 4900 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 27 12:47:21 crc kubenswrapper[4900]: I0127 12:47:21.907869 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:21 crc kubenswrapper[4900]: E0127 12:47:21.907887 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs podName:e08773f7-5eaf-4a76-b671-0681c02a3471 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:22.907860739 +0000 UTC m=+1270.144888959 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs") pod "openstack-operator-controller-manager-7c8f46b9cc-4h24l" (UID: "e08773f7-5eaf-4a76-b671-0681c02a3471") : secret "webhook-server-cert" not found Jan 27 12:47:21 crc kubenswrapper[4900]: E0127 12:47:21.908086 4900 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 27 12:47:21 crc kubenswrapper[4900]: E0127 12:47:21.908127 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs podName:e08773f7-5eaf-4a76-b671-0681c02a3471 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:22.908115807 +0000 UTC m=+1270.145144017 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs") pod "openstack-operator-controller-manager-7c8f46b9cc-4h24l" (UID: "e08773f7-5eaf-4a76-b671-0681c02a3471") : secret "metrics-server-cert" not found Jan 27 12:47:21 crc kubenswrapper[4900]: E0127 12:47:21.908178 4900 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 27 12:47:21 crc kubenswrapper[4900]: E0127 12:47:21.908199 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert podName:76f1d09b-01aa-4c81-b568-8ffb58182475 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:23.908191269 +0000 UTC m=+1271.145219479 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert") pod "infra-operator-controller-manager-7d75bc88d5-6xhcl" (UID: "76f1d09b-01aa-4c81-b568-8ffb58182475") : secret "infra-operator-webhook-server-cert" not found Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.228438 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" event={"ID":"e8b4a268-6430-4f23-bd93-aa62b52710a6","Type":"ContainerStarted","Data":"00b0a11ee9cdb19d15a0fab268fe2a698a86c0ff6ec53d9cf1cffed7a06854d9"} Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.235182 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" event={"ID":"e3cba13f-5396-4c71-8f81-d2d932baca1f","Type":"ContainerStarted","Data":"ead1c7fbd74c41e6b9dd027b77b700a9cbb13daa6539529ef7b024d2d9b56fd1"} Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.247372 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" event={"ID":"b094071d-c368-40e6-8515-a17d0a22a868","Type":"ContainerStarted","Data":"780ba688e8dfae4688a736db048f1622bc2a928ab169897b50703f9565522521"} Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.252712 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" event={"ID":"5155088c-b873-4fac-b1e9-87f57c2fae68","Type":"ContainerStarted","Data":"0b96364bb4392e05b546d9582fedbcdb3239cf548c72400d266f4cc75b212cf3"} Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.440552 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t"] Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.524745 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85462bq5\" (UID: \"899811c4-fce0-42df-b3e7-9b1495cad676\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:47:22 crc kubenswrapper[4900]: E0127 12:47:22.524990 4900 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 12:47:22 crc kubenswrapper[4900]: E0127 12:47:22.525153 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert podName:899811c4-fce0-42df-b3e7-9b1495cad676 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:24.52511767 +0000 UTC m=+1271.762145880 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" (UID: "899811c4-fce0-42df-b3e7-9b1495cad676") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.600711 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c"] Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.600762 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g"] Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.600776 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r"] Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.600788 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826"] Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.600801 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc"] Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.600811 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf"] Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.608436 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk"] Jan 27 12:47:22 crc kubenswrapper[4900]: W0127 12:47:22.611259 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8365f5d_b2f2_4cab_a803_e722c65ae307.slice/crio-8a74bade799655a470895ddca0c27bda5a73103108c7694d7489815b667a4651 WatchSource:0}: Error finding container 8a74bade799655a470895ddca0c27bda5a73103108c7694d7489815b667a4651: Status 404 returned error can't find the container with id 8a74bade799655a470895ddca0c27bda5a73103108c7694d7489815b667a4651 Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.617316 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj"] Jan 27 12:47:22 crc kubenswrapper[4900]: W0127 12:47:22.632600 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4967ec79_a9dd_438a_9cb7_b89b3af09ff5.slice/crio-a5c66496e06c3a1edcaa077f6733ed4c8a7b6d0375af1eb2acdeae48a1380ca2 WatchSource:0}: Error finding container a5c66496e06c3a1edcaa077f6733ed4c8a7b6d0375af1eb2acdeae48a1380ca2: Status 404 returned error can't find the container with id a5c66496e06c3a1edcaa077f6733ed4c8a7b6d0375af1eb2acdeae48a1380ca2 Jan 27 12:47:22 crc kubenswrapper[4900]: W0127 12:47:22.634611 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4715fe70_acab_4dea_adde_68e1a6e8cb28.slice/crio-5612703c1b1444d9f52ea31cbb7a674fc27a244c07e58144058f22a46cd6259f WatchSource:0}: Error finding container 5612703c1b1444d9f52ea31cbb7a674fc27a244c07e58144058f22a46cd6259f: Status 404 returned error can't find the container with id 5612703c1b1444d9f52ea31cbb7a674fc27a244c07e58144058f22a46cd6259f Jan 27 12:47:22 crc kubenswrapper[4900]: W0127 12:47:22.634803 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod70b6c48f_4c95_468f_a792_abe4e318948f.slice/crio-fa5e2e60250c5c36d2bd38a2de3c318f8488c9fc2474949f778d70d1a0ff4687 WatchSource:0}: Error finding container fa5e2e60250c5c36d2bd38a2de3c318f8488c9fc2474949f778d70d1a0ff4687: Status 404 returned error can't find the container with id fa5e2e60250c5c36d2bd38a2de3c318f8488c9fc2474949f778d70d1a0ff4687 Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.936293 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:22 crc kubenswrapper[4900]: I0127 12:47:22.936870 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:22 crc kubenswrapper[4900]: E0127 12:47:22.937109 4900 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 27 12:47:22 crc kubenswrapper[4900]: E0127 12:47:22.937199 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs podName:e08773f7-5eaf-4a76-b671-0681c02a3471 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:24.937164117 +0000 UTC m=+1272.174192327 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs") pod "openstack-operator-controller-manager-7c8f46b9cc-4h24l" (UID: "e08773f7-5eaf-4a76-b671-0681c02a3471") : secret "webhook-server-cert" not found Jan 27 12:47:22 crc kubenswrapper[4900]: E0127 12:47:22.937891 4900 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 27 12:47:22 crc kubenswrapper[4900]: E0127 12:47:22.937934 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs podName:e08773f7-5eaf-4a76-b671-0681c02a3471 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:24.937923689 +0000 UTC m=+1272.174951899 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs") pod "openstack-operator-controller-manager-7c8f46b9cc-4h24l" (UID: "e08773f7-5eaf-4a76-b671-0681c02a3471") : secret "metrics-server-cert" not found Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.001716 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns"] Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.004800 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw"] Jan 27 12:47:23 crc kubenswrapper[4900]: W0127 12:47:23.005632 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d74e7b5_7dcd_4edc_9b82_1dea0a9570a7.slice/crio-51b7cf832129aa8b368fa5e725bdd9d37950838b1758a6f88274ae16e7794739 WatchSource:0}: Error finding container 51b7cf832129aa8b368fa5e725bdd9d37950838b1758a6f88274ae16e7794739: Status 404 returned error can't find the container with id 51b7cf832129aa8b368fa5e725bdd9d37950838b1758a6f88274ae16e7794739 Jan 27 12:47:23 crc kubenswrapper[4900]: E0127 12:47:23.027755 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ccblw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-69797bbcbd-krlqc_openstack-operators(5d4cc48d-12ab-458e-bf29-bc87a182f5c3): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 27 12:47:23 crc kubenswrapper[4900]: E0127 12:47:23.028858 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" podUID="5d4cc48d-12ab-458e-bf29-bc87a182f5c3" Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.033296 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26"] Jan 27 12:47:23 crc kubenswrapper[4900]: W0127 12:47:23.039765 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbe0258a0_aba9_4900_b507_4767b2726a69.slice/crio-ab1950caa57497e3ed7c3f856fc9344a45f3f25e96517cecffa2d26d402991f0 WatchSource:0}: Error finding container ab1950caa57497e3ed7c3f856fc9344a45f3f25e96517cecffa2d26d402991f0: Status 404 returned error can't find the container with id ab1950caa57497e3ed7c3f856fc9344a45f3f25e96517cecffa2d26d402991f0 Jan 27 12:47:23 crc kubenswrapper[4900]: W0127 12:47:23.047160 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda9e9714e_647d_42a9_9073_1cbd72a6b647.slice/crio-188ebe623298aeb94bcc01290267b30e42e77c7b438ebc35ea474721e654087a WatchSource:0}: Error finding container 188ebe623298aeb94bcc01290267b30e42e77c7b438ebc35ea474721e654087a: Status 404 returned error can't find the container with id 188ebe623298aeb94bcc01290267b30e42e77c7b438ebc35ea474721e654087a Jan 27 12:47:23 crc kubenswrapper[4900]: E0127 12:47:23.047529 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/watcher-operator@sha256:52883b60ff78eccd14e5f0d0730e1f0c1af7c0a212c19e0b0797bd63c2dc2a87,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8p5s9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-7579fb95dd-5zsrz_openstack-operators(be0258a0-aba9-4900-b507-4767b2726a69): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 27 12:47:23 crc kubenswrapper[4900]: E0127 12:47:23.049075 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" podUID="be0258a0-aba9-4900-b507-4767b2726a69" Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.052605 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz"] Jan 27 12:47:23 crc kubenswrapper[4900]: E0127 12:47:23.054726 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5f8c2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-92ln6_openstack-operators(a9e9714e-647d-42a9-9073-1cbd72a6b647): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 27 12:47:23 crc kubenswrapper[4900]: E0127 12:47:23.055944 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6" podUID="a9e9714e-647d-42a9-9073-1cbd72a6b647" Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.062750 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc"] Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.071632 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6"] Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.265610 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" event={"ID":"5104a740-a23d-4ea4-a186-97768d490075","Type":"ContainerStarted","Data":"5831cd62ca2b3af1f516962cfe6ee2d84258d59207d4391ed1cd25b1a2da9760"} Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.268194 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" event={"ID":"a988e8ab-311d-4b6a-a75e-c49601a77d46","Type":"ContainerStarted","Data":"9e0cba92d636c5e76f6587b37797dc8cc52d48100d1b57a849d9d4cf0dc0ba25"} Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.270825 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" event={"ID":"70b6c48f-4c95-468f-a792-abe4e318948f","Type":"ContainerStarted","Data":"fa5e2e60250c5c36d2bd38a2de3c318f8488c9fc2474949f778d70d1a0ff4687"} Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.274730 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" event={"ID":"e8365f5d-b2f2-4cab-a803-e722c65ae307","Type":"ContainerStarted","Data":"8a74bade799655a470895ddca0c27bda5a73103108c7694d7489815b667a4651"} Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.279210 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" event={"ID":"5d4cc48d-12ab-458e-bf29-bc87a182f5c3","Type":"ContainerStarted","Data":"d270e14ee6756b2c22ec385d31188ef8d636a04e60cea8bdb8544affdfa152fa"} Jan 27 12:47:23 crc kubenswrapper[4900]: E0127 12:47:23.281371 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" podUID="5d4cc48d-12ab-458e-bf29-bc87a182f5c3" Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.281738 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" event={"ID":"175105c2-dfc2-4752-bf75-a027d86dc373","Type":"ContainerStarted","Data":"48e223b6c4a76a6ff1521527308e9c397f1522099cb88595536f5a234fa23c23"} Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.285998 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6" event={"ID":"a9e9714e-647d-42a9-9073-1cbd72a6b647","Type":"ContainerStarted","Data":"188ebe623298aeb94bcc01290267b30e42e77c7b438ebc35ea474721e654087a"} Jan 27 12:47:23 crc kubenswrapper[4900]: E0127 12:47:23.290601 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6" podUID="a9e9714e-647d-42a9-9073-1cbd72a6b647" Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.291632 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" event={"ID":"1759ba9c-7c4a-4380-81f5-e67d8e418fa1","Type":"ContainerStarted","Data":"e3bb2a4346e7a213c223776eb0d4a5f79b0dd8db47ae7fb3e40dd3376808429d"} Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.297004 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" event={"ID":"5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7","Type":"ContainerStarted","Data":"51b7cf832129aa8b368fa5e725bdd9d37950838b1758a6f88274ae16e7794739"} Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.306481 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" event={"ID":"65b8356b-f64f-4cb8-94af-6b8d45448a63","Type":"ContainerStarted","Data":"a019b092b8b0b4ff0a6b6e7e707edd1a069a3a0a94be547b1a08affd310701a8"} Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.311687 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" event={"ID":"4967ec79-a9dd-438a-9cb7-b89b3af09ff5","Type":"ContainerStarted","Data":"a5c66496e06c3a1edcaa077f6733ed4c8a7b6d0375af1eb2acdeae48a1380ca2"} Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.315174 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" event={"ID":"be0258a0-aba9-4900-b507-4767b2726a69","Type":"ContainerStarted","Data":"ab1950caa57497e3ed7c3f856fc9344a45f3f25e96517cecffa2d26d402991f0"} Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.317320 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" event={"ID":"4715fe70-acab-4dea-adde-68e1a6e8cb28","Type":"ContainerStarted","Data":"5612703c1b1444d9f52ea31cbb7a674fc27a244c07e58144058f22a46cd6259f"} Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.319252 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" event={"ID":"a6dce274-9090-44fc-ac6b-6e164e5b7192","Type":"ContainerStarted","Data":"ef967b971cc194481c357bd235f76fa98c77ad25dfa606c511ac7c4592c2e947"} Jan 27 12:47:23 crc kubenswrapper[4900]: E0127 12:47:23.319499 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/watcher-operator@sha256:52883b60ff78eccd14e5f0d0730e1f0c1af7c0a212c19e0b0797bd63c2dc2a87\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" podUID="be0258a0-aba9-4900-b507-4767b2726a69" Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.321293 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" event={"ID":"0c0782a0-6d83-4760-82dd-cea358647713","Type":"ContainerStarted","Data":"d13fc91d9efe80187d4efd027475ee22986853b626828921d3caaa25e80cd76d"} Jan 27 12:47:23 crc kubenswrapper[4900]: I0127 12:47:23.976558 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert\") pod \"infra-operator-controller-manager-7d75bc88d5-6xhcl\" (UID: \"76f1d09b-01aa-4c81-b568-8ffb58182475\") " pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:47:23 crc kubenswrapper[4900]: E0127 12:47:23.976899 4900 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 27 12:47:23 crc kubenswrapper[4900]: E0127 12:47:23.977065 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert podName:76f1d09b-01aa-4c81-b568-8ffb58182475 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:27.977019382 +0000 UTC m=+1275.214047752 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert") pod "infra-operator-controller-manager-7d75bc88d5-6xhcl" (UID: "76f1d09b-01aa-4c81-b568-8ffb58182475") : secret "infra-operator-webhook-server-cert" not found Jan 27 12:47:24 crc kubenswrapper[4900]: E0127 12:47:24.338247 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6" podUID="a9e9714e-647d-42a9-9073-1cbd72a6b647" Jan 27 12:47:24 crc kubenswrapper[4900]: E0127 12:47:24.338727 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/watcher-operator@sha256:52883b60ff78eccd14e5f0d0730e1f0c1af7c0a212c19e0b0797bd63c2dc2a87\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" podUID="be0258a0-aba9-4900-b507-4767b2726a69" Jan 27 12:47:24 crc kubenswrapper[4900]: E0127 12:47:24.338797 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" podUID="5d4cc48d-12ab-458e-bf29-bc87a182f5c3" Jan 27 12:47:24 crc kubenswrapper[4900]: I0127 12:47:24.588806 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85462bq5\" (UID: \"899811c4-fce0-42df-b3e7-9b1495cad676\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:47:24 crc kubenswrapper[4900]: E0127 12:47:24.589816 4900 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 12:47:24 crc kubenswrapper[4900]: E0127 12:47:24.589968 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert podName:899811c4-fce0-42df-b3e7-9b1495cad676 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:28.589927877 +0000 UTC m=+1275.826956117 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" (UID: "899811c4-fce0-42df-b3e7-9b1495cad676") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 12:47:24 crc kubenswrapper[4900]: I0127 12:47:24.998240 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:24 crc kubenswrapper[4900]: E0127 12:47:24.998516 4900 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 27 12:47:24 crc kubenswrapper[4900]: I0127 12:47:24.998546 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:24 crc kubenswrapper[4900]: E0127 12:47:24.998666 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs podName:e08773f7-5eaf-4a76-b671-0681c02a3471 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:28.998621806 +0000 UTC m=+1276.235650006 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs") pod "openstack-operator-controller-manager-7c8f46b9cc-4h24l" (UID: "e08773f7-5eaf-4a76-b671-0681c02a3471") : secret "webhook-server-cert" not found Jan 27 12:47:24 crc kubenswrapper[4900]: E0127 12:47:24.998820 4900 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 27 12:47:24 crc kubenswrapper[4900]: E0127 12:47:24.998957 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs podName:e08773f7-5eaf-4a76-b671-0681c02a3471 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:28.998914165 +0000 UTC m=+1276.235942555 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs") pod "openstack-operator-controller-manager-7c8f46b9cc-4h24l" (UID: "e08773f7-5eaf-4a76-b671-0681c02a3471") : secret "metrics-server-cert" not found Jan 27 12:47:28 crc kubenswrapper[4900]: I0127 12:47:28.064200 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert\") pod \"infra-operator-controller-manager-7d75bc88d5-6xhcl\" (UID: \"76f1d09b-01aa-4c81-b568-8ffb58182475\") " pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:47:28 crc kubenswrapper[4900]: E0127 12:47:28.065187 4900 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 27 12:47:28 crc kubenswrapper[4900]: E0127 12:47:28.065257 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert podName:76f1d09b-01aa-4c81-b568-8ffb58182475 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:36.065238699 +0000 UTC m=+1283.302266909 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert") pod "infra-operator-controller-manager-7d75bc88d5-6xhcl" (UID: "76f1d09b-01aa-4c81-b568-8ffb58182475") : secret "infra-operator-webhook-server-cert" not found Jan 27 12:47:28 crc kubenswrapper[4900]: I0127 12:47:28.676809 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85462bq5\" (UID: \"899811c4-fce0-42df-b3e7-9b1495cad676\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:47:28 crc kubenswrapper[4900]: E0127 12:47:28.677081 4900 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 12:47:28 crc kubenswrapper[4900]: E0127 12:47:28.677192 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert podName:899811c4-fce0-42df-b3e7-9b1495cad676 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:36.677166476 +0000 UTC m=+1283.914194686 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" (UID: "899811c4-fce0-42df-b3e7-9b1495cad676") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 12:47:29 crc kubenswrapper[4900]: I0127 12:47:29.085282 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:29 crc kubenswrapper[4900]: I0127 12:47:29.085420 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:29 crc kubenswrapper[4900]: E0127 12:47:29.085564 4900 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 27 12:47:29 crc kubenswrapper[4900]: E0127 12:47:29.085600 4900 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 27 12:47:29 crc kubenswrapper[4900]: E0127 12:47:29.085723 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs podName:e08773f7-5eaf-4a76-b671-0681c02a3471 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:37.085691609 +0000 UTC m=+1284.322719879 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs") pod "openstack-operator-controller-manager-7c8f46b9cc-4h24l" (UID: "e08773f7-5eaf-4a76-b671-0681c02a3471") : secret "webhook-server-cert" not found Jan 27 12:47:29 crc kubenswrapper[4900]: E0127 12:47:29.085810 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs podName:e08773f7-5eaf-4a76-b671-0681c02a3471 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:37.085779362 +0000 UTC m=+1284.322807662 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs") pod "openstack-operator-controller-manager-7c8f46b9cc-4h24l" (UID: "e08773f7-5eaf-4a76-b671-0681c02a3471") : secret "metrics-server-cert" not found Jan 27 12:47:36 crc kubenswrapper[4900]: I0127 12:47:36.147280 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert\") pod \"infra-operator-controller-manager-7d75bc88d5-6xhcl\" (UID: \"76f1d09b-01aa-4c81-b568-8ffb58182475\") " pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:47:36 crc kubenswrapper[4900]: E0127 12:47:36.148327 4900 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 27 12:47:36 crc kubenswrapper[4900]: E0127 12:47:36.148414 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert podName:76f1d09b-01aa-4c81-b568-8ffb58182475 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:52.148388086 +0000 UTC m=+1299.385416296 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert") pod "infra-operator-controller-manager-7d75bc88d5-6xhcl" (UID: "76f1d09b-01aa-4c81-b568-8ffb58182475") : secret "infra-operator-webhook-server-cert" not found Jan 27 12:47:36 crc kubenswrapper[4900]: E0127 12:47:36.323197 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/glance-operator@sha256:bc45409dff26aca6bd982684cfaf093548adb6a71928f5257fe60ab5535dda39" Jan 27 12:47:36 crc kubenswrapper[4900]: E0127 12:47:36.323567 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/glance-operator@sha256:bc45409dff26aca6bd982684cfaf093548adb6a71928f5257fe60ab5535dda39,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tt827,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-67dd55ff59-dl6ln_openstack-operators(e8b4a268-6430-4f23-bd93-aa62b52710a6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:47:36 crc kubenswrapper[4900]: E0127 12:47:36.324807 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" podUID="e8b4a268-6430-4f23-bd93-aa62b52710a6" Jan 27 12:47:36 crc kubenswrapper[4900]: E0127 12:47:36.501387 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/glance-operator@sha256:bc45409dff26aca6bd982684cfaf093548adb6a71928f5257fe60ab5535dda39\\\"\"" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" podUID="e8b4a268-6430-4f23-bd93-aa62b52710a6" Jan 27 12:47:36 crc kubenswrapper[4900]: I0127 12:47:36.760602 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85462bq5\" (UID: \"899811c4-fce0-42df-b3e7-9b1495cad676\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:47:36 crc kubenswrapper[4900]: E0127 12:47:36.760835 4900 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 12:47:36 crc kubenswrapper[4900]: E0127 12:47:36.760934 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert podName:899811c4-fce0-42df-b3e7-9b1495cad676 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:52.76090826 +0000 UTC m=+1299.997936470 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" (UID: "899811c4-fce0-42df-b3e7-9b1495cad676") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 27 12:47:37 crc kubenswrapper[4900]: I0127 12:47:37.169249 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:37 crc kubenswrapper[4900]: I0127 12:47:37.169514 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:37 crc kubenswrapper[4900]: E0127 12:47:37.169533 4900 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 27 12:47:37 crc kubenswrapper[4900]: E0127 12:47:37.169663 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs podName:e08773f7-5eaf-4a76-b671-0681c02a3471 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:53.1696339 +0000 UTC m=+1300.406662120 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs") pod "openstack-operator-controller-manager-7c8f46b9cc-4h24l" (UID: "e08773f7-5eaf-4a76-b671-0681c02a3471") : secret "webhook-server-cert" not found Jan 27 12:47:37 crc kubenswrapper[4900]: E0127 12:47:37.169749 4900 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 27 12:47:37 crc kubenswrapper[4900]: E0127 12:47:37.169839 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs podName:e08773f7-5eaf-4a76-b671-0681c02a3471 nodeName:}" failed. No retries permitted until 2026-01-27 12:47:53.169815915 +0000 UTC m=+1300.406844165 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs") pod "openstack-operator-controller-manager-7c8f46b9cc-4h24l" (UID: "e08773f7-5eaf-4a76-b671-0681c02a3471") : secret "metrics-server-cert" not found Jan 27 12:47:38 crc kubenswrapper[4900]: E0127 12:47:38.095951 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:fa46fc14710961e6b4a76a3522dca3aa3cfa71436c7cf7ade533d3712822f327" Jan 27 12:47:38 crc kubenswrapper[4900]: E0127 12:47:38.096655 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:fa46fc14710961e6b4a76a3522dca3aa3cfa71436c7cf7ade533d3712822f327,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hsz97,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-6f75f45d54-fkq26_openstack-operators(0c0782a0-6d83-4760-82dd-cea358647713): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:47:38 crc kubenswrapper[4900]: E0127 12:47:38.098150 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" podUID="0c0782a0-6d83-4760-82dd-cea358647713" Jan 27 12:47:38 crc kubenswrapper[4900]: E0127 12:47:38.518833 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:fa46fc14710961e6b4a76a3522dca3aa3cfa71436c7cf7ade533d3712822f327\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" podUID="0c0782a0-6d83-4760-82dd-cea358647713" Jan 27 12:47:38 crc kubenswrapper[4900]: E0127 12:47:38.777993 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:b673f00227298dcfa89abb46f8296a0825add42da41e8a4bf4dd13367c738d84" Jan 27 12:47:38 crc kubenswrapper[4900]: E0127 12:47:38.778531 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:b673f00227298dcfa89abb46f8296a0825add42da41e8a4bf4dd13367c738d84,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-z8b7s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r_openstack-operators(a6dce274-9090-44fc-ac6b-6e164e5b7192): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:47:38 crc kubenswrapper[4900]: E0127 12:47:38.779758 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" podUID="a6dce274-9090-44fc-ac6b-6e164e5b7192" Jan 27 12:47:39 crc kubenswrapper[4900]: E0127 12:47:39.535851 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:b673f00227298dcfa89abb46f8296a0825add42da41e8a4bf4dd13367c738d84\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" podUID="a6dce274-9090-44fc-ac6b-6e164e5b7192" Jan 27 12:47:39 crc kubenswrapper[4900]: E0127 12:47:39.877951 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/neutron-operator@sha256:14786c3a66c41213a03d6375c03209f22d439dd6e752317ddcbe21dda66bb569" Jan 27 12:47:39 crc kubenswrapper[4900]: E0127 12:47:39.878251 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/neutron-operator@sha256:14786c3a66c41213a03d6375c03209f22d439dd6e752317ddcbe21dda66bb569,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qsb2j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-7ffd8d76d4-xfh5t_openstack-operators(5104a740-a23d-4ea4-a186-97768d490075): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:47:39 crc kubenswrapper[4900]: E0127 12:47:39.879412 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" podUID="5104a740-a23d-4ea4-a186-97768d490075" Jan 27 12:47:40 crc kubenswrapper[4900]: E0127 12:47:40.542149 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/neutron-operator@sha256:14786c3a66c41213a03d6375c03209f22d439dd6e752317ddcbe21dda66bb569\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" podUID="5104a740-a23d-4ea4-a186-97768d490075" Jan 27 12:47:42 crc kubenswrapper[4900]: E0127 12:47:42.410229 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/cinder-operator@sha256:7619b8e8814c4d22fcdcc392cdaba2ce279d356fc9263275c91acfba86533591" Jan 27 12:47:42 crc kubenswrapper[4900]: E0127 12:47:42.410868 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/cinder-operator@sha256:7619b8e8814c4d22fcdcc392cdaba2ce279d356fc9263275c91acfba86533591,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qmk66,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-655bf9cfbb-7wh5z_openstack-operators(1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:47:42 crc kubenswrapper[4900]: E0127 12:47:42.412000 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" podUID="1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc" Jan 27 12:47:42 crc kubenswrapper[4900]: E0127 12:47:42.558324 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/cinder-operator@sha256:7619b8e8814c4d22fcdcc392cdaba2ce279d356fc9263275c91acfba86533591\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" podUID="1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc" Jan 27 12:47:43 crc kubenswrapper[4900]: E0127 12:47:43.200353 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922" Jan 27 12:47:43 crc kubenswrapper[4900]: E0127 12:47:43.200666 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lmxcb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-547cbdb99f-dkghw_openstack-operators(5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:47:43 crc kubenswrapper[4900]: E0127 12:47:43.203084 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" podUID="5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7" Jan 27 12:47:43 crc kubenswrapper[4900]: E0127 12:47:43.571260 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922\\\"\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" podUID="5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7" Jan 27 12:47:43 crc kubenswrapper[4900]: E0127 12:47:43.771519 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/ironic-operator@sha256:30e2224475338d3a02d617ae147dc7dc09867cce4ac3543b313a1923c46299fa" Jan 27 12:47:43 crc kubenswrapper[4900]: E0127 12:47:43.771747 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/ironic-operator@sha256:30e2224475338d3a02d617ae147dc7dc09867cce4ac3543b313a1923c46299fa,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-stmc5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-768b776ffb-2cndf_openstack-operators(4715fe70-acab-4dea-adde-68e1a6e8cb28): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:47:43 crc kubenswrapper[4900]: E0127 12:47:43.772925 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" podUID="4715fe70-acab-4dea-adde-68e1a6e8cb28" Jan 27 12:47:44 crc kubenswrapper[4900]: E0127 12:47:44.580588 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/ironic-operator@sha256:30e2224475338d3a02d617ae147dc7dc09867cce4ac3543b313a1923c46299fa\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" podUID="4715fe70-acab-4dea-adde-68e1a6e8cb28" Jan 27 12:47:45 crc kubenswrapper[4900]: E0127 12:47:45.816350 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/manila-operator@sha256:82feceb236aaeae01761b172c94173d2624fe12feeb76a18c8aa2a664bafaf84" Jan 27 12:47:45 crc kubenswrapper[4900]: E0127 12:47:45.817044 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/manila-operator@sha256:82feceb236aaeae01761b172c94173d2624fe12feeb76a18c8aa2a664bafaf84,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2bwln,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-849fcfbb6b-9m826_openstack-operators(4967ec79-a9dd-438a-9cb7-b89b3af09ff5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:47:45 crc kubenswrapper[4900]: E0127 12:47:45.818469 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" podUID="4967ec79-a9dd-438a-9cb7-b89b3af09ff5" Jan 27 12:47:46 crc kubenswrapper[4900]: E0127 12:47:46.598970 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/manila-operator@sha256:82feceb236aaeae01761b172c94173d2624fe12feeb76a18c8aa2a664bafaf84\\\"\"" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" podUID="4967ec79-a9dd-438a-9cb7-b89b3af09ff5" Jan 27 12:47:47 crc kubenswrapper[4900]: E0127 12:47:47.399602 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.153:5001/openstack-k8s-operators/telemetry-operator:8bd837c8c2e9e101738b6d034950bde14118a832" Jan 27 12:47:47 crc kubenswrapper[4900]: E0127 12:47:47.399732 4900 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.153:5001/openstack-k8s-operators/telemetry-operator:8bd837c8c2e9e101738b6d034950bde14118a832" Jan 27 12:47:47 crc kubenswrapper[4900]: E0127 12:47:47.399914 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.153:5001/openstack-k8s-operators/telemetry-operator:8bd837c8c2e9e101738b6d034950bde14118a832,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hntp4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-659968c8f5-zkwns_openstack-operators(1759ba9c-7c4a-4380-81f5-e67d8e418fa1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:47:47 crc kubenswrapper[4900]: E0127 12:47:47.401113 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" podUID="1759ba9c-7c4a-4380-81f5-e67d8e418fa1" Jan 27 12:47:47 crc kubenswrapper[4900]: E0127 12:47:47.605974 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.153:5001/openstack-k8s-operators/telemetry-operator:8bd837c8c2e9e101738b6d034950bde14118a832\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" podUID="1759ba9c-7c4a-4380-81f5-e67d8e418fa1" Jan 27 12:47:50 crc kubenswrapper[4900]: E0127 12:47:50.330218 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/keystone-operator@sha256:008a2e338430e7dd513f81f66320cc5c1332c332a3191b537d75786489d7f487" Jan 27 12:47:50 crc kubenswrapper[4900]: E0127 12:47:50.331286 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/keystone-operator@sha256:008a2e338430e7dd513f81f66320cc5c1332c332a3191b537d75786489d7f487,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7gdbh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-55f684fd56-ns96g_openstack-operators(e8365f5d-b2f2-4cab-a803-e722c65ae307): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:47:50 crc kubenswrapper[4900]: E0127 12:47:50.332978 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" podUID="e8365f5d-b2f2-4cab-a803-e722c65ae307" Jan 27 12:47:50 crc kubenswrapper[4900]: E0127 12:47:50.653644 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/keystone-operator@sha256:008a2e338430e7dd513f81f66320cc5c1332c332a3191b537d75786489d7f487\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" podUID="e8365f5d-b2f2-4cab-a803-e722c65ae307" Jan 27 12:47:51 crc kubenswrapper[4900]: E0127 12:47:51.078534 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/nova-operator@sha256:123ea3339f27822e161e5fa113f4c3ecbd8348533cf3067b43ebf32874eb46cc" Jan 27 12:47:51 crc kubenswrapper[4900]: E0127 12:47:51.078870 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/nova-operator@sha256:123ea3339f27822e161e5fa113f4c3ecbd8348533cf3067b43ebf32874eb46cc,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-v4kgk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-fbd766fb6-25hgc_openstack-operators(a988e8ab-311d-4b6a-a75e-c49601a77d46): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:47:51 crc kubenswrapper[4900]: E0127 12:47:51.080868 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" podUID="a988e8ab-311d-4b6a-a75e-c49601a77d46" Jan 27 12:47:51 crc kubenswrapper[4900]: E0127 12:47:51.444322 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Jan 27 12:47:51 crc kubenswrapper[4900]: E0127 12:47:51.444823 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5f8c2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-92ln6_openstack-operators(a9e9714e-647d-42a9-9073-1cbd72a6b647): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:47:51 crc kubenswrapper[4900]: E0127 12:47:51.446311 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6" podUID="a9e9714e-647d-42a9-9073-1cbd72a6b647" Jan 27 12:47:51 crc kubenswrapper[4900]: E0127 12:47:51.660960 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/nova-operator@sha256:123ea3339f27822e161e5fa113f4c3ecbd8348533cf3067b43ebf32874eb46cc\\\"\"" pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" podUID="a988e8ab-311d-4b6a-a75e-c49601a77d46" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.205497 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert\") pod \"infra-operator-controller-manager-7d75bc88d5-6xhcl\" (UID: \"76f1d09b-01aa-4c81-b568-8ffb58182475\") " pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.213115 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/76f1d09b-01aa-4c81-b568-8ffb58182475-cert\") pod \"infra-operator-controller-manager-7d75bc88d5-6xhcl\" (UID: \"76f1d09b-01aa-4c81-b568-8ffb58182475\") " pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.221851 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.688678 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" event={"ID":"5155088c-b873-4fac-b1e9-87f57c2fae68","Type":"ContainerStarted","Data":"954f1472a4332cf69bf437bcbddcec111a538ca0aa87548ca7a46082e473e36b"} Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.689036 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.716464 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" event={"ID":"e3cba13f-5396-4c71-8f81-d2d932baca1f","Type":"ContainerStarted","Data":"ac250a9ede14ee079e40247a74aff76b65f426829c7efcbd10b4c8ceaebcb267"} Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.717462 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.746116 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" podStartSLOduration=8.717569767 podStartE2EDuration="33.746081831s" podCreationTimestamp="2026-01-27 12:47:19 +0000 UTC" firstStartedPulling="2026-01-27 12:47:21.804293324 +0000 UTC m=+1269.041321534" lastFinishedPulling="2026-01-27 12:47:46.832805388 +0000 UTC m=+1294.069833598" observedRunningTime="2026-01-27 12:47:52.728961222 +0000 UTC m=+1299.965989432" watchObservedRunningTime="2026-01-27 12:47:52.746081831 +0000 UTC m=+1299.983110041" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.750623 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" event={"ID":"65b8356b-f64f-4cb8-94af-6b8d45448a63","Type":"ContainerStarted","Data":"023b30fd30a32705b07854dda9fb615c97252c2ef1b0e8cbe13ba9ff5ee63997"} Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.750724 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.756120 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" event={"ID":"e8b4a268-6430-4f23-bd93-aa62b52710a6","Type":"ContainerStarted","Data":"d9338ed33200c9e065ccb79cea4c9223e9f3ac49cafc9ed6ccca4e1a23fc523e"} Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.759161 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.766216 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" event={"ID":"5d4cc48d-12ab-458e-bf29-bc87a182f5c3","Type":"ContainerStarted","Data":"943c7fb6ef06ff71bab49858a9541a8146ce54b545050fcf2083bebe7ef9e5eb"} Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.766429 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" podStartSLOduration=9.527346443 podStartE2EDuration="33.766395182s" podCreationTimestamp="2026-01-27 12:47:19 +0000 UTC" firstStartedPulling="2026-01-27 12:47:21.56749486 +0000 UTC m=+1268.804523070" lastFinishedPulling="2026-01-27 12:47:45.806543599 +0000 UTC m=+1293.043571809" observedRunningTime="2026-01-27 12:47:52.757572685 +0000 UTC m=+1299.994600915" watchObservedRunningTime="2026-01-27 12:47:52.766395182 +0000 UTC m=+1300.003423392" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.768844 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.775952 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" event={"ID":"be0258a0-aba9-4900-b507-4767b2726a69","Type":"ContainerStarted","Data":"bd87e5c0415491c3bbf838bc8bf853e32163b78b16baeb857a3b4a277c2fdf7b"} Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.776190 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.781209 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" event={"ID":"175105c2-dfc2-4752-bf75-a027d86dc373","Type":"ContainerStarted","Data":"3991ff7a492ad9878add28f9d7275d00148926f916bc88b48bde93aa19add8a4"} Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.781385 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.787394 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" event={"ID":"5104a740-a23d-4ea4-a186-97768d490075","Type":"ContainerStarted","Data":"98fbeba64ca4e585158d7ef6430f1bf7824cb06161df1ae61412aa4fd3a5de12"} Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.787729 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.789480 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" event={"ID":"70b6c48f-4c95-468f-a792-abe4e318948f","Type":"ContainerStarted","Data":"e84bc809ad2225f49cef532836e817c8e475bec9cdd9478baab66de849df233d"} Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.789598 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.794594 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" event={"ID":"b094071d-c368-40e6-8515-a17d0a22a868","Type":"ContainerStarted","Data":"e92fd0c987ba28a32e5f7ecac1a9a26af100dd7f9283f562e7083588da812bdc"} Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.799955 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.807538 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" podStartSLOduration=4.061149577 podStartE2EDuration="33.807492689s" podCreationTimestamp="2026-01-27 12:47:19 +0000 UTC" firstStartedPulling="2026-01-27 12:47:21.78391397 +0000 UTC m=+1269.020942170" lastFinishedPulling="2026-01-27 12:47:51.530257062 +0000 UTC m=+1298.767285282" observedRunningTime="2026-01-27 12:47:52.780503513 +0000 UTC m=+1300.017531743" watchObservedRunningTime="2026-01-27 12:47:52.807492689 +0000 UTC m=+1300.044520899" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.849977 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85462bq5\" (UID: \"899811c4-fce0-42df-b3e7-9b1495cad676\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.855394 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" podStartSLOduration=7.288398161 podStartE2EDuration="32.855353812s" podCreationTimestamp="2026-01-27 12:47:20 +0000 UTC" firstStartedPulling="2026-01-27 12:47:22.633526127 +0000 UTC m=+1269.870554337" lastFinishedPulling="2026-01-27 12:47:48.200481778 +0000 UTC m=+1295.437509988" observedRunningTime="2026-01-27 12:47:52.830475958 +0000 UTC m=+1300.067504178" watchObservedRunningTime="2026-01-27 12:47:52.855353812 +0000 UTC m=+1300.092382022" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.857287 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/899811c4-fce0-42df-b3e7-9b1495cad676-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85462bq5\" (UID: \"899811c4-fce0-42df-b3e7-9b1495cad676\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.876303 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" podStartSLOduration=8.285165407 podStartE2EDuration="33.876271041s" podCreationTimestamp="2026-01-27 12:47:19 +0000 UTC" firstStartedPulling="2026-01-27 12:47:22.609283141 +0000 UTC m=+1269.846311361" lastFinishedPulling="2026-01-27 12:47:48.200388775 +0000 UTC m=+1295.437416995" observedRunningTime="2026-01-27 12:47:52.849505682 +0000 UTC m=+1300.086533892" watchObservedRunningTime="2026-01-27 12:47:52.876271041 +0000 UTC m=+1300.113299251" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.924127 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" podStartSLOduration=4.494749736 podStartE2EDuration="32.924090473s" podCreationTimestamp="2026-01-27 12:47:20 +0000 UTC" firstStartedPulling="2026-01-27 12:47:23.047264133 +0000 UTC m=+1270.284292343" lastFinishedPulling="2026-01-27 12:47:51.47660487 +0000 UTC m=+1298.713633080" observedRunningTime="2026-01-27 12:47:52.901728872 +0000 UTC m=+1300.138757082" watchObservedRunningTime="2026-01-27 12:47:52.924090473 +0000 UTC m=+1300.161118683" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.947988 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.948661 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" podStartSLOduration=3.46792268 podStartE2EDuration="32.948632568s" podCreationTimestamp="2026-01-27 12:47:20 +0000 UTC" firstStartedPulling="2026-01-27 12:47:22.477396461 +0000 UTC m=+1269.714424671" lastFinishedPulling="2026-01-27 12:47:51.958106349 +0000 UTC m=+1299.195134559" observedRunningTime="2026-01-27 12:47:52.935135665 +0000 UTC m=+1300.172163885" watchObservedRunningTime="2026-01-27 12:47:52.948632568 +0000 UTC m=+1300.185660778" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.970294 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" podStartSLOduration=4.50586337 podStartE2EDuration="32.970264888s" podCreationTimestamp="2026-01-27 12:47:20 +0000 UTC" firstStartedPulling="2026-01-27 12:47:23.027384614 +0000 UTC m=+1270.264412824" lastFinishedPulling="2026-01-27 12:47:51.491786132 +0000 UTC m=+1298.728814342" observedRunningTime="2026-01-27 12:47:52.961524073 +0000 UTC m=+1300.198552283" watchObservedRunningTime="2026-01-27 12:47:52.970264888 +0000 UTC m=+1300.207293098" Jan 27 12:47:52 crc kubenswrapper[4900]: I0127 12:47:52.997211 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" podStartSLOduration=7.44293118 podStartE2EDuration="32.997174121s" podCreationTimestamp="2026-01-27 12:47:20 +0000 UTC" firstStartedPulling="2026-01-27 12:47:22.646224386 +0000 UTC m=+1269.883252596" lastFinishedPulling="2026-01-27 12:47:48.200467327 +0000 UTC m=+1295.437495537" observedRunningTime="2026-01-27 12:47:52.989427996 +0000 UTC m=+1300.226456206" watchObservedRunningTime="2026-01-27 12:47:52.997174121 +0000 UTC m=+1300.234202331" Jan 27 12:47:53 crc kubenswrapper[4900]: I0127 12:47:53.027987 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" podStartSLOduration=7.422687377 podStartE2EDuration="34.027958657s" podCreationTimestamp="2026-01-27 12:47:19 +0000 UTC" firstStartedPulling="2026-01-27 12:47:21.595255579 +0000 UTC m=+1268.832283789" lastFinishedPulling="2026-01-27 12:47:48.200526859 +0000 UTC m=+1295.437555069" observedRunningTime="2026-01-27 12:47:53.02151366 +0000 UTC m=+1300.258541870" watchObservedRunningTime="2026-01-27 12:47:53.027958657 +0000 UTC m=+1300.264986867" Jan 27 12:47:53 crc kubenswrapper[4900]: I0127 12:47:53.129241 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl"] Jan 27 12:47:53 crc kubenswrapper[4900]: I0127 12:47:53.256801 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:53 crc kubenswrapper[4900]: I0127 12:47:53.256933 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:53 crc kubenswrapper[4900]: I0127 12:47:53.262112 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-metrics-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:53 crc kubenswrapper[4900]: I0127 12:47:53.269218 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e08773f7-5eaf-4a76-b671-0681c02a3471-webhook-certs\") pod \"openstack-operator-controller-manager-7c8f46b9cc-4h24l\" (UID: \"e08773f7-5eaf-4a76-b671-0681c02a3471\") " pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:53 crc kubenswrapper[4900]: I0127 12:47:53.306845 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:56 crc kubenswrapper[4900]: W0127 12:47:56.541022 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod76f1d09b_01aa_4c81_b568_8ffb58182475.slice/crio-ff5a2f7aca2c95628057dd493044f6e227d91ae2adc65b6a08eedd06d505c174 WatchSource:0}: Error finding container ff5a2f7aca2c95628057dd493044f6e227d91ae2adc65b6a08eedd06d505c174: Status 404 returned error can't find the container with id ff5a2f7aca2c95628057dd493044f6e227d91ae2adc65b6a08eedd06d505c174 Jan 27 12:47:56 crc kubenswrapper[4900]: I0127 12:47:56.850025 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" event={"ID":"76f1d09b-01aa-4c81-b568-8ffb58182475","Type":"ContainerStarted","Data":"ff5a2f7aca2c95628057dd493044f6e227d91ae2adc65b6a08eedd06d505c174"} Jan 27 12:47:57 crc kubenswrapper[4900]: I0127 12:47:57.332293 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5"] Jan 27 12:47:57 crc kubenswrapper[4900]: W0127 12:47:57.337053 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod899811c4_fce0_42df_b3e7_9b1495cad676.slice/crio-0d36fa294b79f7dcef3abb44f21a8dc1666d0dc0d6b4c44c781d37f6a2f95020 WatchSource:0}: Error finding container 0d36fa294b79f7dcef3abb44f21a8dc1666d0dc0d6b4c44c781d37f6a2f95020: Status 404 returned error can't find the container with id 0d36fa294b79f7dcef3abb44f21a8dc1666d0dc0d6b4c44c781d37f6a2f95020 Jan 27 12:47:57 crc kubenswrapper[4900]: I0127 12:47:57.424779 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l"] Jan 27 12:47:57 crc kubenswrapper[4900]: W0127 12:47:57.427185 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode08773f7_5eaf_4a76_b671_0681c02a3471.slice/crio-65a9d7c9714ba3865d06956d6014929494e6a5859cca3ce279427bbb21a04772 WatchSource:0}: Error finding container 65a9d7c9714ba3865d06956d6014929494e6a5859cca3ce279427bbb21a04772: Status 404 returned error can't find the container with id 65a9d7c9714ba3865d06956d6014929494e6a5859cca3ce279427bbb21a04772 Jan 27 12:47:57 crc kubenswrapper[4900]: I0127 12:47:57.861028 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" event={"ID":"e08773f7-5eaf-4a76-b671-0681c02a3471","Type":"ContainerStarted","Data":"65a9d7c9714ba3865d06956d6014929494e6a5859cca3ce279427bbb21a04772"} Jan 27 12:47:57 crc kubenswrapper[4900]: I0127 12:47:57.862965 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" event={"ID":"899811c4-fce0-42df-b3e7-9b1495cad676","Type":"ContainerStarted","Data":"0d36fa294b79f7dcef3abb44f21a8dc1666d0dc0d6b4c44c781d37f6a2f95020"} Jan 27 12:47:58 crc kubenswrapper[4900]: I0127 12:47:58.899696 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" event={"ID":"e08773f7-5eaf-4a76-b671-0681c02a3471","Type":"ContainerStarted","Data":"34a9594c6ae90d00461e21b81bedad93da2bc761e8d58e4762f7d4aa27511d4a"} Jan 27 12:47:58 crc kubenswrapper[4900]: I0127 12:47:58.902713 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:47:58 crc kubenswrapper[4900]: I0127 12:47:58.947790 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" podStartSLOduration=38.947761969 podStartE2EDuration="38.947761969s" podCreationTimestamp="2026-01-27 12:47:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:47:58.941501877 +0000 UTC m=+1306.178530087" watchObservedRunningTime="2026-01-27 12:47:58.947761969 +0000 UTC m=+1306.184790179" Jan 27 12:47:59 crc kubenswrapper[4900]: I0127 12:47:59.934807 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" event={"ID":"a6dce274-9090-44fc-ac6b-6e164e5b7192","Type":"ContainerStarted","Data":"4970da8afe29292c201ee826011d88c5919d06bb4b38483275131c1cda4bb336"} Jan 27 12:47:59 crc kubenswrapper[4900]: I0127 12:47:59.935633 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" Jan 27 12:47:59 crc kubenswrapper[4900]: I0127 12:47:59.942592 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" event={"ID":"4715fe70-acab-4dea-adde-68e1a6e8cb28","Type":"ContainerStarted","Data":"770ef99116ff54db2e42dde3f75299b068985901bdb7d934924122cf9029012c"} Jan 27 12:47:59 crc kubenswrapper[4900]: I0127 12:47:59.942921 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" Jan 27 12:47:59 crc kubenswrapper[4900]: I0127 12:47:59.945779 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" event={"ID":"0c0782a0-6d83-4760-82dd-cea358647713","Type":"ContainerStarted","Data":"376abb96cffb053d016377b5ed6da29858093424d1be04d17724790a9c153b62"} Jan 27 12:47:59 crc kubenswrapper[4900]: I0127 12:47:59.946098 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" Jan 27 12:47:59 crc kubenswrapper[4900]: I0127 12:47:59.948763 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" event={"ID":"1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc","Type":"ContainerStarted","Data":"ca6eff7f1f22a60ccaf0efff42fe528cfaab535d2cb29df5a16c8e6c91e17881"} Jan 27 12:47:59 crc kubenswrapper[4900]: I0127 12:47:59.949991 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" Jan 27 12:47:59 crc kubenswrapper[4900]: I0127 12:47:59.954102 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" event={"ID":"5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7","Type":"ContainerStarted","Data":"ab36d077f7f7cdc804ad3fecd4314cec0f20c10bfde3e05e4b540e999624ba94"} Jan 27 12:47:59 crc kubenswrapper[4900]: I0127 12:47:59.965208 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" podStartSLOduration=3.9419230499999998 podStartE2EDuration="39.965177061s" podCreationTimestamp="2026-01-27 12:47:20 +0000 UTC" firstStartedPulling="2026-01-27 12:47:22.598976031 +0000 UTC m=+1269.836004241" lastFinishedPulling="2026-01-27 12:47:58.622230042 +0000 UTC m=+1305.859258252" observedRunningTime="2026-01-27 12:47:59.959182297 +0000 UTC m=+1307.196210507" watchObservedRunningTime="2026-01-27 12:47:59.965177061 +0000 UTC m=+1307.202205271" Jan 27 12:47:59 crc kubenswrapper[4900]: I0127 12:47:59.983811 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" podStartSLOduration=4.289455809 podStartE2EDuration="39.983778203s" podCreationTimestamp="2026-01-27 12:47:20 +0000 UTC" firstStartedPulling="2026-01-27 12:47:23.013888511 +0000 UTC m=+1270.250916721" lastFinishedPulling="2026-01-27 12:47:58.708210905 +0000 UTC m=+1305.945239115" observedRunningTime="2026-01-27 12:47:59.979444836 +0000 UTC m=+1307.216473066" watchObservedRunningTime="2026-01-27 12:47:59.983778203 +0000 UTC m=+1307.220806413" Jan 27 12:48:00 crc kubenswrapper[4900]: I0127 12:48:00.019520 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" podStartSLOduration=3.421724951 podStartE2EDuration="41.019476712s" podCreationTimestamp="2026-01-27 12:47:19 +0000 UTC" firstStartedPulling="2026-01-27 12:47:21.023470611 +0000 UTC m=+1268.260498821" lastFinishedPulling="2026-01-27 12:47:58.621222372 +0000 UTC m=+1305.858250582" observedRunningTime="2026-01-27 12:48:00.004620489 +0000 UTC m=+1307.241648699" watchObservedRunningTime="2026-01-27 12:48:00.019476712 +0000 UTC m=+1307.256504932" Jan 27 12:48:00 crc kubenswrapper[4900]: I0127 12:48:00.051560 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" podStartSLOduration=4.906445058 podStartE2EDuration="41.051474124s" podCreationTimestamp="2026-01-27 12:47:19 +0000 UTC" firstStartedPulling="2026-01-27 12:47:22.658924346 +0000 UTC m=+1269.895952556" lastFinishedPulling="2026-01-27 12:47:58.803953412 +0000 UTC m=+1306.040981622" observedRunningTime="2026-01-27 12:48:00.043124501 +0000 UTC m=+1307.280152711" watchObservedRunningTime="2026-01-27 12:48:00.051474124 +0000 UTC m=+1307.288502334" Jan 27 12:48:00 crc kubenswrapper[4900]: I0127 12:48:00.080654 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" podStartSLOduration=4.004946125 podStartE2EDuration="40.080609512s" podCreationTimestamp="2026-01-27 12:47:20 +0000 UTC" firstStartedPulling="2026-01-27 12:47:23.009673898 +0000 UTC m=+1270.246702108" lastFinishedPulling="2026-01-27 12:47:59.085337285 +0000 UTC m=+1306.322365495" observedRunningTime="2026-01-27 12:48:00.066663566 +0000 UTC m=+1307.303691766" watchObservedRunningTime="2026-01-27 12:48:00.080609512 +0000 UTC m=+1307.317637722" Jan 27 12:48:00 crc kubenswrapper[4900]: I0127 12:48:00.211650 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" Jan 27 12:48:00 crc kubenswrapper[4900]: I0127 12:48:00.449755 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" Jan 27 12:48:00 crc kubenswrapper[4900]: I0127 12:48:00.566635 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" Jan 27 12:48:00 crc kubenswrapper[4900]: I0127 12:48:00.574540 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" Jan 27 12:48:00 crc kubenswrapper[4900]: I0127 12:48:00.684901 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" Jan 27 12:48:00 crc kubenswrapper[4900]: I0127 12:48:00.863467 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" Jan 27 12:48:01 crc kubenswrapper[4900]: I0127 12:48:01.036398 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" event={"ID":"4967ec79-a9dd-438a-9cb7-b89b3af09ff5","Type":"ContainerStarted","Data":"3df16b4b82c3dfc84c617bf2b7bd0aae46b048fd7ddd4d9048d29c9896a95fe9"} Jan 27 12:48:01 crc kubenswrapper[4900]: I0127 12:48:01.066700 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" podStartSLOduration=3.672048593 podStartE2EDuration="41.06666877s" podCreationTimestamp="2026-01-27 12:47:20 +0000 UTC" firstStartedPulling="2026-01-27 12:47:22.632804796 +0000 UTC m=+1269.869833006" lastFinishedPulling="2026-01-27 12:48:00.027424973 +0000 UTC m=+1307.264453183" observedRunningTime="2026-01-27 12:48:01.05741407 +0000 UTC m=+1308.294442300" watchObservedRunningTime="2026-01-27 12:48:01.06666877 +0000 UTC m=+1308.303696980" Jan 27 12:48:01 crc kubenswrapper[4900]: I0127 12:48:01.081758 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" Jan 27 12:48:01 crc kubenswrapper[4900]: I0127 12:48:01.225223 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" Jan 27 12:48:01 crc kubenswrapper[4900]: I0127 12:48:01.305575 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" Jan 27 12:48:01 crc kubenswrapper[4900]: I0127 12:48:01.412016 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" Jan 27 12:48:01 crc kubenswrapper[4900]: I0127 12:48:01.464412 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" Jan 27 12:48:02 crc kubenswrapper[4900]: E0127 12:48:02.485204 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6" podUID="a9e9714e-647d-42a9-9073-1cbd72a6b647" Jan 27 12:48:03 crc kubenswrapper[4900]: I0127 12:48:03.313251 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 12:48:04 crc kubenswrapper[4900]: I0127 12:48:04.064359 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" event={"ID":"e8365f5d-b2f2-4cab-a803-e722c65ae307","Type":"ContainerStarted","Data":"1c50ef9ffa44b457912f3657d06434126302622c370a6f754ca08944352679ae"} Jan 27 12:48:04 crc kubenswrapper[4900]: I0127 12:48:04.064855 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" Jan 27 12:48:04 crc kubenswrapper[4900]: I0127 12:48:04.065953 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" event={"ID":"899811c4-fce0-42df-b3e7-9b1495cad676","Type":"ContainerStarted","Data":"f6f5f302e7703bf610746eb0aefa198f328bdd65009e6ebf2fb182c19e425e5f"} Jan 27 12:48:04 crc kubenswrapper[4900]: I0127 12:48:04.066100 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:48:04 crc kubenswrapper[4900]: I0127 12:48:04.067861 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" event={"ID":"76f1d09b-01aa-4c81-b568-8ffb58182475","Type":"ContainerStarted","Data":"11ffe4fdbf45feed2cc89339c55b726a151c623cf9b120ddda2ab9a2d4dd1355"} Jan 27 12:48:04 crc kubenswrapper[4900]: I0127 12:48:04.067921 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:48:04 crc kubenswrapper[4900]: I0127 12:48:04.069773 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" event={"ID":"1759ba9c-7c4a-4380-81f5-e67d8e418fa1","Type":"ContainerStarted","Data":"00e70f70795450c9e4770e1318c91ee7fe1a8efcbdc07dc21e5060e409dbf39f"} Jan 27 12:48:04 crc kubenswrapper[4900]: I0127 12:48:04.070403 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" Jan 27 12:48:04 crc kubenswrapper[4900]: I0127 12:48:04.086829 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" podStartSLOduration=4.146284446 podStartE2EDuration="45.086796542s" podCreationTimestamp="2026-01-27 12:47:19 +0000 UTC" firstStartedPulling="2026-01-27 12:47:22.617966064 +0000 UTC m=+1269.854994274" lastFinishedPulling="2026-01-27 12:48:03.55847816 +0000 UTC m=+1310.795506370" observedRunningTime="2026-01-27 12:48:04.085219796 +0000 UTC m=+1311.322248006" watchObservedRunningTime="2026-01-27 12:48:04.086796542 +0000 UTC m=+1311.323824762" Jan 27 12:48:04 crc kubenswrapper[4900]: I0127 12:48:04.118345 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" podStartSLOduration=3.5658456 podStartE2EDuration="44.118312469s" podCreationTimestamp="2026-01-27 12:47:20 +0000 UTC" firstStartedPulling="2026-01-27 12:47:23.008889625 +0000 UTC m=+1270.245917835" lastFinishedPulling="2026-01-27 12:48:03.561356494 +0000 UTC m=+1310.798384704" observedRunningTime="2026-01-27 12:48:04.117382332 +0000 UTC m=+1311.354410542" watchObservedRunningTime="2026-01-27 12:48:04.118312469 +0000 UTC m=+1311.355340679" Jan 27 12:48:04 crc kubenswrapper[4900]: I0127 12:48:04.137393 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" podStartSLOduration=38.127280097 podStartE2EDuration="45.137360824s" podCreationTimestamp="2026-01-27 12:47:19 +0000 UTC" firstStartedPulling="2026-01-27 12:47:56.550953818 +0000 UTC m=+1303.787982028" lastFinishedPulling="2026-01-27 12:48:03.561034545 +0000 UTC m=+1310.798062755" observedRunningTime="2026-01-27 12:48:04.136416117 +0000 UTC m=+1311.373444327" watchObservedRunningTime="2026-01-27 12:48:04.137360824 +0000 UTC m=+1311.374389034" Jan 27 12:48:04 crc kubenswrapper[4900]: I0127 12:48:04.181612 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" podStartSLOduration=37.964037059 podStartE2EDuration="44.181566281s" podCreationTimestamp="2026-01-27 12:47:20 +0000 UTC" firstStartedPulling="2026-01-27 12:47:57.340985989 +0000 UTC m=+1304.578014189" lastFinishedPulling="2026-01-27 12:48:03.558515201 +0000 UTC m=+1310.795543411" observedRunningTime="2026-01-27 12:48:04.174096594 +0000 UTC m=+1311.411124804" watchObservedRunningTime="2026-01-27 12:48:04.181566281 +0000 UTC m=+1311.418594491" Jan 27 12:48:07 crc kubenswrapper[4900]: I0127 12:48:07.112425 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" event={"ID":"a988e8ab-311d-4b6a-a75e-c49601a77d46","Type":"ContainerStarted","Data":"f8bffeb7b34bbe589752569b56a4a2c4e4f407c4e437d5472daa1d9b7d70a8c5"} Jan 27 12:48:07 crc kubenswrapper[4900]: I0127 12:48:07.113434 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" Jan 27 12:48:07 crc kubenswrapper[4900]: I0127 12:48:07.138482 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" podStartSLOduration=2.858368142 podStartE2EDuration="47.138434269s" podCreationTimestamp="2026-01-27 12:47:20 +0000 UTC" firstStartedPulling="2026-01-27 12:47:22.63364718 +0000 UTC m=+1269.870675380" lastFinishedPulling="2026-01-27 12:48:06.913713297 +0000 UTC m=+1314.150741507" observedRunningTime="2026-01-27 12:48:07.130806317 +0000 UTC m=+1314.367834557" watchObservedRunningTime="2026-01-27 12:48:07.138434269 +0000 UTC m=+1314.375462479" Jan 27 12:48:10 crc kubenswrapper[4900]: I0127 12:48:10.167724 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" Jan 27 12:48:10 crc kubenswrapper[4900]: I0127 12:48:10.698336 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" Jan 27 12:48:10 crc kubenswrapper[4900]: I0127 12:48:10.747002 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" Jan 27 12:48:10 crc kubenswrapper[4900]: I0127 12:48:10.755879 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" Jan 27 12:48:10 crc kubenswrapper[4900]: I0127 12:48:10.854349 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" Jan 27 12:48:10 crc kubenswrapper[4900]: I0127 12:48:10.857434 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" Jan 27 12:48:11 crc kubenswrapper[4900]: I0127 12:48:11.184232 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" Jan 27 12:48:11 crc kubenswrapper[4900]: I0127 12:48:11.308941 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" Jan 27 12:48:11 crc kubenswrapper[4900]: I0127 12:48:11.497435 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" Jan 27 12:48:12 crc kubenswrapper[4900]: I0127 12:48:12.227847 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" Jan 27 12:48:12 crc kubenswrapper[4900]: I0127 12:48:12.956080 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 12:48:18 crc kubenswrapper[4900]: I0127 12:48:18.212836 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6" event={"ID":"a9e9714e-647d-42a9-9073-1cbd72a6b647","Type":"ContainerStarted","Data":"9a0dd5433536ad1e539a57bbac1cbf6fd6ae9062b09affae2884affbdc4c5bc5"} Jan 27 12:48:18 crc kubenswrapper[4900]: I0127 12:48:18.235902 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-92ln6" podStartSLOduration=5.153061173 podStartE2EDuration="58.235857606s" podCreationTimestamp="2026-01-27 12:47:20 +0000 UTC" firstStartedPulling="2026-01-27 12:47:23.054477643 +0000 UTC m=+1270.291505863" lastFinishedPulling="2026-01-27 12:48:16.137274086 +0000 UTC m=+1323.374302296" observedRunningTime="2026-01-27 12:48:18.23462741 +0000 UTC m=+1325.471655610" watchObservedRunningTime="2026-01-27 12:48:18.235857606 +0000 UTC m=+1325.472885816" Jan 27 12:48:21 crc kubenswrapper[4900]: I0127 12:48:21.038877 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" Jan 27 12:48:22 crc kubenswrapper[4900]: I0127 12:48:22.372550 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:48:22 crc kubenswrapper[4900]: I0127 12:48:22.372680 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:48:32 crc kubenswrapper[4900]: I0127 12:48:32.392776 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-b5p78"] Jan 27 12:48:32 crc kubenswrapper[4900]: I0127 12:48:32.396673 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:48:32 crc kubenswrapper[4900]: I0127 12:48:32.424715 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b5p78"] Jan 27 12:48:32 crc kubenswrapper[4900]: I0127 12:48:32.432249 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1344dba3-f0d4-468c-ad6b-1653648f6017-utilities\") pod \"redhat-operators-b5p78\" (UID: \"1344dba3-f0d4-468c-ad6b-1653648f6017\") " pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:48:32 crc kubenswrapper[4900]: I0127 12:48:32.432378 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1344dba3-f0d4-468c-ad6b-1653648f6017-catalog-content\") pod \"redhat-operators-b5p78\" (UID: \"1344dba3-f0d4-468c-ad6b-1653648f6017\") " pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:48:32 crc kubenswrapper[4900]: I0127 12:48:32.432490 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xrfq\" (UniqueName: \"kubernetes.io/projected/1344dba3-f0d4-468c-ad6b-1653648f6017-kube-api-access-6xrfq\") pod \"redhat-operators-b5p78\" (UID: \"1344dba3-f0d4-468c-ad6b-1653648f6017\") " pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:48:32 crc kubenswrapper[4900]: I0127 12:48:32.534688 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1344dba3-f0d4-468c-ad6b-1653648f6017-utilities\") pod \"redhat-operators-b5p78\" (UID: \"1344dba3-f0d4-468c-ad6b-1653648f6017\") " pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:48:32 crc kubenswrapper[4900]: I0127 12:48:32.534863 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xrfq\" (UniqueName: \"kubernetes.io/projected/1344dba3-f0d4-468c-ad6b-1653648f6017-kube-api-access-6xrfq\") pod \"redhat-operators-b5p78\" (UID: \"1344dba3-f0d4-468c-ad6b-1653648f6017\") " pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:48:32 crc kubenswrapper[4900]: I0127 12:48:32.534892 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1344dba3-f0d4-468c-ad6b-1653648f6017-catalog-content\") pod \"redhat-operators-b5p78\" (UID: \"1344dba3-f0d4-468c-ad6b-1653648f6017\") " pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:48:32 crc kubenswrapper[4900]: I0127 12:48:32.536022 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1344dba3-f0d4-468c-ad6b-1653648f6017-catalog-content\") pod \"redhat-operators-b5p78\" (UID: \"1344dba3-f0d4-468c-ad6b-1653648f6017\") " pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:48:32 crc kubenswrapper[4900]: I0127 12:48:32.536280 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1344dba3-f0d4-468c-ad6b-1653648f6017-utilities\") pod \"redhat-operators-b5p78\" (UID: \"1344dba3-f0d4-468c-ad6b-1653648f6017\") " pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:48:32 crc kubenswrapper[4900]: I0127 12:48:32.558596 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xrfq\" (UniqueName: \"kubernetes.io/projected/1344dba3-f0d4-468c-ad6b-1653648f6017-kube-api-access-6xrfq\") pod \"redhat-operators-b5p78\" (UID: \"1344dba3-f0d4-468c-ad6b-1653648f6017\") " pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:48:32 crc kubenswrapper[4900]: I0127 12:48:32.721633 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:48:33 crc kubenswrapper[4900]: I0127 12:48:33.223429 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b5p78"] Jan 27 12:48:33 crc kubenswrapper[4900]: I0127 12:48:33.349765 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5p78" event={"ID":"1344dba3-f0d4-468c-ad6b-1653648f6017","Type":"ContainerStarted","Data":"645fdfbcce36732996306a98206ec33247b5fc1ef3fd2b32ad2b8239c2cae71b"} Jan 27 12:48:34 crc kubenswrapper[4900]: I0127 12:48:34.360049 4900 generic.go:334] "Generic (PLEG): container finished" podID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerID="780e66f5685639a9c3fcdf06e1e97c102d2ca0d6a15d6ea21db96a2e31d9a197" exitCode=0 Jan 27 12:48:34 crc kubenswrapper[4900]: I0127 12:48:34.360433 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5p78" event={"ID":"1344dba3-f0d4-468c-ad6b-1653648f6017","Type":"ContainerDied","Data":"780e66f5685639a9c3fcdf06e1e97c102d2ca0d6a15d6ea21db96a2e31d9a197"} Jan 27 12:48:34 crc kubenswrapper[4900]: I0127 12:48:34.363409 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 12:48:35 crc kubenswrapper[4900]: I0127 12:48:35.370903 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5p78" event={"ID":"1344dba3-f0d4-468c-ad6b-1653648f6017","Type":"ContainerStarted","Data":"2e8fdd581fcba7b2893113318b5cdf7cd75b33f2584d345e8a2dd922865e5ba5"} Jan 27 12:48:35 crc kubenswrapper[4900]: I0127 12:48:35.942124 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-64kbp"] Jan 27 12:48:35 crc kubenswrapper[4900]: I0127 12:48:35.944025 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-64kbp" Jan 27 12:48:35 crc kubenswrapper[4900]: I0127 12:48:35.950873 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 27 12:48:35 crc kubenswrapper[4900]: I0127 12:48:35.951302 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 27 12:48:35 crc kubenswrapper[4900]: I0127 12:48:35.951578 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-92sdf" Jan 27 12:48:35 crc kubenswrapper[4900]: I0127 12:48:35.951737 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 27 12:48:35 crc kubenswrapper[4900]: I0127 12:48:35.961963 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-64kbp"] Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.058543 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-gf2v2"] Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.067944 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.076677 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.108374 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-gf2v2"] Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.114346 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xflwh\" (UniqueName: \"kubernetes.io/projected/ea7300b4-1cbb-4f83-8ac3-668d188d0e8b-kube-api-access-xflwh\") pod \"dnsmasq-dns-675f4bcbfc-64kbp\" (UID: \"ea7300b4-1cbb-4f83-8ac3-668d188d0e8b\") " pod="openstack/dnsmasq-dns-675f4bcbfc-64kbp" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.114475 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea7300b4-1cbb-4f83-8ac3-668d188d0e8b-config\") pod \"dnsmasq-dns-675f4bcbfc-64kbp\" (UID: \"ea7300b4-1cbb-4f83-8ac3-668d188d0e8b\") " pod="openstack/dnsmasq-dns-675f4bcbfc-64kbp" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.216470 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea7300b4-1cbb-4f83-8ac3-668d188d0e8b-config\") pod \"dnsmasq-dns-675f4bcbfc-64kbp\" (UID: \"ea7300b4-1cbb-4f83-8ac3-668d188d0e8b\") " pod="openstack/dnsmasq-dns-675f4bcbfc-64kbp" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.216864 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-442jh\" (UniqueName: \"kubernetes.io/projected/2f1bb41c-4126-4f2a-95a0-917dec713e9d-kube-api-access-442jh\") pod \"dnsmasq-dns-78dd6ddcc-gf2v2\" (UID: \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.216956 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f1bb41c-4126-4f2a-95a0-917dec713e9d-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-gf2v2\" (UID: \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.217093 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xflwh\" (UniqueName: \"kubernetes.io/projected/ea7300b4-1cbb-4f83-8ac3-668d188d0e8b-kube-api-access-xflwh\") pod \"dnsmasq-dns-675f4bcbfc-64kbp\" (UID: \"ea7300b4-1cbb-4f83-8ac3-668d188d0e8b\") " pod="openstack/dnsmasq-dns-675f4bcbfc-64kbp" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.217132 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f1bb41c-4126-4f2a-95a0-917dec713e9d-config\") pod \"dnsmasq-dns-78dd6ddcc-gf2v2\" (UID: \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.218352 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea7300b4-1cbb-4f83-8ac3-668d188d0e8b-config\") pod \"dnsmasq-dns-675f4bcbfc-64kbp\" (UID: \"ea7300b4-1cbb-4f83-8ac3-668d188d0e8b\") " pod="openstack/dnsmasq-dns-675f4bcbfc-64kbp" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.241610 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xflwh\" (UniqueName: \"kubernetes.io/projected/ea7300b4-1cbb-4f83-8ac3-668d188d0e8b-kube-api-access-xflwh\") pod \"dnsmasq-dns-675f4bcbfc-64kbp\" (UID: \"ea7300b4-1cbb-4f83-8ac3-668d188d0e8b\") " pod="openstack/dnsmasq-dns-675f4bcbfc-64kbp" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.280765 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-64kbp" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.319608 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f1bb41c-4126-4f2a-95a0-917dec713e9d-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-gf2v2\" (UID: \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.319751 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f1bb41c-4126-4f2a-95a0-917dec713e9d-config\") pod \"dnsmasq-dns-78dd6ddcc-gf2v2\" (UID: \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.319851 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-442jh\" (UniqueName: \"kubernetes.io/projected/2f1bb41c-4126-4f2a-95a0-917dec713e9d-kube-api-access-442jh\") pod \"dnsmasq-dns-78dd6ddcc-gf2v2\" (UID: \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.320905 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f1bb41c-4126-4f2a-95a0-917dec713e9d-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-gf2v2\" (UID: \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.320905 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f1bb41c-4126-4f2a-95a0-917dec713e9d-config\") pod \"dnsmasq-dns-78dd6ddcc-gf2v2\" (UID: \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.341983 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-442jh\" (UniqueName: \"kubernetes.io/projected/2f1bb41c-4126-4f2a-95a0-917dec713e9d-kube-api-access-442jh\") pod \"dnsmasq-dns-78dd6ddcc-gf2v2\" (UID: \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.412012 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" Jan 27 12:48:36 crc kubenswrapper[4900]: W0127 12:48:36.834506 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea7300b4_1cbb_4f83_8ac3_668d188d0e8b.slice/crio-93af817d2fd516f53e13e05408003794cd5ffda9b3617bb31a8492f1c780a661 WatchSource:0}: Error finding container 93af817d2fd516f53e13e05408003794cd5ffda9b3617bb31a8492f1c780a661: Status 404 returned error can't find the container with id 93af817d2fd516f53e13e05408003794cd5ffda9b3617bb31a8492f1c780a661 Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.838493 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-64kbp"] Jan 27 12:48:36 crc kubenswrapper[4900]: I0127 12:48:36.978025 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-gf2v2"] Jan 27 12:48:37 crc kubenswrapper[4900]: I0127 12:48:37.390908 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" event={"ID":"2f1bb41c-4126-4f2a-95a0-917dec713e9d","Type":"ContainerStarted","Data":"13ad07709ebbc07eb19329e4f3a48cc897eee033a73ea2de261dbe94cd6a1eca"} Jan 27 12:48:37 crc kubenswrapper[4900]: I0127 12:48:37.392730 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-64kbp" event={"ID":"ea7300b4-1cbb-4f83-8ac3-668d188d0e8b","Type":"ContainerStarted","Data":"93af817d2fd516f53e13e05408003794cd5ffda9b3617bb31a8492f1c780a661"} Jan 27 12:48:38 crc kubenswrapper[4900]: I0127 12:48:38.406305 4900 generic.go:334] "Generic (PLEG): container finished" podID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerID="2e8fdd581fcba7b2893113318b5cdf7cd75b33f2584d345e8a2dd922865e5ba5" exitCode=0 Jan 27 12:48:38 crc kubenswrapper[4900]: I0127 12:48:38.406368 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5p78" event={"ID":"1344dba3-f0d4-468c-ad6b-1653648f6017","Type":"ContainerDied","Data":"2e8fdd581fcba7b2893113318b5cdf7cd75b33f2584d345e8a2dd922865e5ba5"} Jan 27 12:48:38 crc kubenswrapper[4900]: I0127 12:48:38.973285 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-64kbp"] Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.001509 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-f9tqz"] Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.003779 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.029488 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-f9tqz"] Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.194348 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d02f684-ed10-4d14-8a81-fea1453f4b2a-config\") pod \"dnsmasq-dns-666b6646f7-f9tqz\" (UID: \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\") " pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.194521 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d02f684-ed10-4d14-8a81-fea1453f4b2a-dns-svc\") pod \"dnsmasq-dns-666b6646f7-f9tqz\" (UID: \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\") " pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.194599 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghx8b\" (UniqueName: \"kubernetes.io/projected/4d02f684-ed10-4d14-8a81-fea1453f4b2a-kube-api-access-ghx8b\") pod \"dnsmasq-dns-666b6646f7-f9tqz\" (UID: \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\") " pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.297172 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d02f684-ed10-4d14-8a81-fea1453f4b2a-dns-svc\") pod \"dnsmasq-dns-666b6646f7-f9tqz\" (UID: \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\") " pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.297297 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghx8b\" (UniqueName: \"kubernetes.io/projected/4d02f684-ed10-4d14-8a81-fea1453f4b2a-kube-api-access-ghx8b\") pod \"dnsmasq-dns-666b6646f7-f9tqz\" (UID: \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\") " pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.297382 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d02f684-ed10-4d14-8a81-fea1453f4b2a-config\") pod \"dnsmasq-dns-666b6646f7-f9tqz\" (UID: \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\") " pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.298381 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d02f684-ed10-4d14-8a81-fea1453f4b2a-dns-svc\") pod \"dnsmasq-dns-666b6646f7-f9tqz\" (UID: \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\") " pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.299622 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d02f684-ed10-4d14-8a81-fea1453f4b2a-config\") pod \"dnsmasq-dns-666b6646f7-f9tqz\" (UID: \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\") " pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.351709 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-gf2v2"] Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.371298 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghx8b\" (UniqueName: \"kubernetes.io/projected/4d02f684-ed10-4d14-8a81-fea1453f4b2a-kube-api-access-ghx8b\") pod \"dnsmasq-dns-666b6646f7-f9tqz\" (UID: \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\") " pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.409825 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wh4rj"] Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.412457 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.546442 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wh4rj"] Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.612602 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8dcc12a-564d-412b-b443-70036eab30a7-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-wh4rj\" (UID: \"d8dcc12a-564d-412b-b443-70036eab30a7\") " pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.612753 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8dcc12a-564d-412b-b443-70036eab30a7-config\") pod \"dnsmasq-dns-57d769cc4f-wh4rj\" (UID: \"d8dcc12a-564d-412b-b443-70036eab30a7\") " pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.612796 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjm9f\" (UniqueName: \"kubernetes.io/projected/d8dcc12a-564d-412b-b443-70036eab30a7-kube-api-access-jjm9f\") pod \"dnsmasq-dns-57d769cc4f-wh4rj\" (UID: \"d8dcc12a-564d-412b-b443-70036eab30a7\") " pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.636672 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.715107 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8dcc12a-564d-412b-b443-70036eab30a7-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-wh4rj\" (UID: \"d8dcc12a-564d-412b-b443-70036eab30a7\") " pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.715176 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8dcc12a-564d-412b-b443-70036eab30a7-config\") pod \"dnsmasq-dns-57d769cc4f-wh4rj\" (UID: \"d8dcc12a-564d-412b-b443-70036eab30a7\") " pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.715209 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjm9f\" (UniqueName: \"kubernetes.io/projected/d8dcc12a-564d-412b-b443-70036eab30a7-kube-api-access-jjm9f\") pod \"dnsmasq-dns-57d769cc4f-wh4rj\" (UID: \"d8dcc12a-564d-412b-b443-70036eab30a7\") " pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.716913 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8dcc12a-564d-412b-b443-70036eab30a7-config\") pod \"dnsmasq-dns-57d769cc4f-wh4rj\" (UID: \"d8dcc12a-564d-412b-b443-70036eab30a7\") " pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.717119 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8dcc12a-564d-412b-b443-70036eab30a7-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-wh4rj\" (UID: \"d8dcc12a-564d-412b-b443-70036eab30a7\") " pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.745843 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjm9f\" (UniqueName: \"kubernetes.io/projected/d8dcc12a-564d-412b-b443-70036eab30a7-kube-api-access-jjm9f\") pod \"dnsmasq-dns-57d769cc4f-wh4rj\" (UID: \"d8dcc12a-564d-412b-b443-70036eab30a7\") " pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" Jan 27 12:48:39 crc kubenswrapper[4900]: I0127 12:48:39.767771 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.155449 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.162921 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.171553 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.171592 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.171551 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.171815 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.171893 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-t2hwg" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.172026 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.172372 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.188984 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.192023 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-2"] Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.194390 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.222549 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-1"] Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.225633 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.260112 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.307449 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.344617 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.344956 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.345133 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-config-data\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.345247 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.345366 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-server-conf\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.345462 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.345583 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-pod-info\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.345700 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.345823 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7db8t\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-kube-api-access-7db8t\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.345924 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.346157 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.448602 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.448727 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.448766 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.448796 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.448821 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.448842 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.448867 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-config-data\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.448931 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.448951 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.448982 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449026 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449047 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-server-conf\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449095 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449126 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-pod-info\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449141 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca415683-2d53-4bdc-b9f7-c98610a65cc3-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449159 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-pod-info\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449186 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkhpw\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-kube-api-access-qkhpw\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449209 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449237 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449263 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0f579265-8309-4424-805b-8013143bfeda\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0f579265-8309-4424-805b-8013143bfeda\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449283 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7db8t\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-kube-api-access-7db8t\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449325 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449374 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449403 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca415683-2d53-4bdc-b9f7-c98610a65cc3-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449432 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449480 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpp2r\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-kube-api-access-jpp2r\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449502 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-server-conf\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449517 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449598 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449620 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-config-data\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449646 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-config-data\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449670 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.449704 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.450377 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.452677 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.453781 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-config-data\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.454853 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-server-conf\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.455275 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.465968 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-f9tqz"] Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.472306 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-pod-info\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.482860 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.487863 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.488712 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.537863 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.537986 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0e710899f4a916659691db79ca451e0bb173d45a0872809026b858f8d492b434/globalmount\"" pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.553336 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7db8t\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-kube-api-access-7db8t\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.564297 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.564438 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.564460 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.564482 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.564521 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.564534 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.564612 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.564667 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-pod-info\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.564687 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca415683-2d53-4bdc-b9f7-c98610a65cc3-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.564764 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkhpw\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-kube-api-access-qkhpw\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.564822 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.564901 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0f579265-8309-4424-805b-8013143bfeda\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0f579265-8309-4424-805b-8013143bfeda\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.565013 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.565035 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca415683-2d53-4bdc-b9f7-c98610a65cc3-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.565095 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.565118 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpp2r\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-kube-api-access-jpp2r\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.565148 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-server-conf\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.565162 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.565229 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-config-data\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.565258 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.565272 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-config-data\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.565304 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.571248 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wh4rj"] Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.572640 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.573806 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-config-data\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.573956 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.574662 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.575812 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.584753 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-config-data\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.585480 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.586651 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.587049 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.588281 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-server-conf\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.601564 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.609211 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.609269 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0f579265-8309-4424-805b-8013143bfeda\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0f579265-8309-4424-805b-8013143bfeda\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/30662d0a0a67567137a827b81cb6827625c105288f56960d31edd50967af9ef2/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.609992 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.610017 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/a28b60198351fffe37bd5f288973b6a03f7909d229e7151274d0b3aa1c852f26/globalmount\"" pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.616912 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.617784 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.619165 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-pod-info\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.622404 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.633902 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca415683-2d53-4bdc-b9f7-c98610a65cc3-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.634869 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.643683 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca415683-2d53-4bdc-b9f7-c98610a65cc3-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.644284 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5p78" event={"ID":"1344dba3-f0d4-468c-ad6b-1653648f6017","Type":"ContainerStarted","Data":"cbb57461ade85b8e9900dfb37df333dc4636221e9c9ccb3353807fb9047e2891"} Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.644344 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.645606 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpp2r\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-kube-api-access-jpp2r\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.652474 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkhpw\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-kube-api-access-qkhpw\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.662651 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" event={"ID":"4d02f684-ed10-4d14-8a81-fea1453f4b2a","Type":"ContainerStarted","Data":"b4ca81df630b7e4fcd5538d8b4c0eac766af730cafeede9658244daf6dfb64fb"} Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.662781 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.662881 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.663816 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" event={"ID":"d8dcc12a-564d-412b-b443-70036eab30a7","Type":"ContainerStarted","Data":"eda0c825ddee17b68a706e39edbce2571083d7032cfee5736b42823d830a8ba5"} Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.666940 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.667169 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-fszws" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.667356 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.667565 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.668142 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.672361 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.672458 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.684190 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-b5p78" podStartSLOduration=3.583483253 podStartE2EDuration="8.684048835s" podCreationTimestamp="2026-01-27 12:48:32 +0000 UTC" firstStartedPulling="2026-01-27 12:48:34.363171504 +0000 UTC m=+1341.600199714" lastFinishedPulling="2026-01-27 12:48:39.463737076 +0000 UTC m=+1346.700765296" observedRunningTime="2026-01-27 12:48:40.682668494 +0000 UTC m=+1347.919696724" watchObservedRunningTime="2026-01-27 12:48:40.684048835 +0000 UTC m=+1347.921077045" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.750660 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\") pod \"rabbitmq-server-2\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.777400 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\") pod \"rabbitmq-server-1\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.795113 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0f579265-8309-4424-805b-8013143bfeda\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0f579265-8309-4424-805b-8013143bfeda\") pod \"rabbitmq-server-0\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.826198 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.864345 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.876967 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.878018 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.878206 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c4594c71-599f-4576-bf95-303da1436ca4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.878312 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.878491 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.878589 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c4594c71-599f-4576-bf95-303da1436ca4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.878619 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.878669 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.878844 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.878880 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgfkj\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-kube-api-access-lgfkj\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.878926 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.894002 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.980359 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.980480 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.980518 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.980585 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c4594c71-599f-4576-bf95-303da1436ca4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.980642 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.980667 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.980699 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c4594c71-599f-4576-bf95-303da1436ca4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.980773 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.980815 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.980888 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.980917 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgfkj\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-kube-api-access-lgfkj\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.982499 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.983164 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.983491 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.983990 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.984025 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/5a6b33fd526ee85a4a24fe8833d231aad71709cc2cd25719d061ac882771bde1/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.985248 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.985443 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c4594c71-599f-4576-bf95-303da1436ca4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.986927 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.989605 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:40 crc kubenswrapper[4900]: I0127 12:48:40.991540 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:41 crc kubenswrapper[4900]: I0127 12:48:41.007895 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgfkj\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-kube-api-access-lgfkj\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:41 crc kubenswrapper[4900]: I0127 12:48:41.026786 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c4594c71-599f-4576-bf95-303da1436ca4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:41 crc kubenswrapper[4900]: I0127 12:48:41.068481 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\") pod \"rabbitmq-cell1-server-0\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:41.297349 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:41.893523 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:41.896802 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:41.910944 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:41.911445 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:41.913665 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:41.913683 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:41.922144 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:41.926457 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-xr7j4" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.032725 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7626ad91-9f29-4dae-969a-e23d420319ac-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.033406 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7626ad91-9f29-4dae-969a-e23d420319ac-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.033498 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7626ad91-9f29-4dae-969a-e23d420319ac-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.033624 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7626ad91-9f29-4dae-969a-e23d420319ac-config-data-default\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.033735 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bn8cl\" (UniqueName: \"kubernetes.io/projected/7626ad91-9f29-4dae-969a-e23d420319ac-kube-api-access-bn8cl\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.033933 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c29c84cd-bbfe-4a6b-97c3-4a9318a9b58f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c29c84cd-bbfe-4a6b-97c3-4a9318a9b58f\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.034025 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7626ad91-9f29-4dae-969a-e23d420319ac-kolla-config\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.034084 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7626ad91-9f29-4dae-969a-e23d420319ac-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.136185 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7626ad91-9f29-4dae-969a-e23d420319ac-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.136236 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7626ad91-9f29-4dae-969a-e23d420319ac-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.136269 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7626ad91-9f29-4dae-969a-e23d420319ac-config-data-default\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.136305 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bn8cl\" (UniqueName: \"kubernetes.io/projected/7626ad91-9f29-4dae-969a-e23d420319ac-kube-api-access-bn8cl\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.136363 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c29c84cd-bbfe-4a6b-97c3-4a9318a9b58f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c29c84cd-bbfe-4a6b-97c3-4a9318a9b58f\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.136389 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7626ad91-9f29-4dae-969a-e23d420319ac-kolla-config\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.136406 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7626ad91-9f29-4dae-969a-e23d420319ac-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.136443 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7626ad91-9f29-4dae-969a-e23d420319ac-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.137737 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7626ad91-9f29-4dae-969a-e23d420319ac-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.138234 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7626ad91-9f29-4dae-969a-e23d420319ac-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.138517 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7626ad91-9f29-4dae-969a-e23d420319ac-kolla-config\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.139616 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7626ad91-9f29-4dae-969a-e23d420319ac-config-data-default\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.149627 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7626ad91-9f29-4dae-969a-e23d420319ac-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.151283 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.151337 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c29c84cd-bbfe-4a6b-97c3-4a9318a9b58f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c29c84cd-bbfe-4a6b-97c3-4a9318a9b58f\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/54b7e6afd36e15f28bc58e905d7a15d7330f48a5afa2b3f9810c736f0924e8c1/globalmount\"" pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.165183 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7626ad91-9f29-4dae-969a-e23d420319ac-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.177029 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bn8cl\" (UniqueName: \"kubernetes.io/projected/7626ad91-9f29-4dae-969a-e23d420319ac-kube-api-access-bn8cl\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.209655 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c29c84cd-bbfe-4a6b-97c3-4a9318a9b58f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c29c84cd-bbfe-4a6b-97c3-4a9318a9b58f\") pod \"openstack-galera-0\" (UID: \"7626ad91-9f29-4dae-969a-e23d420319ac\") " pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.263036 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.721917 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:42.722051 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.069310 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.073764 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.081947 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-c9bz6" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.082262 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.082406 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.083177 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.087475 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.170190 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnqsw\" (UniqueName: \"kubernetes.io/projected/eca1d592-3310-47ed-a815-8f32bc974d9b-kube-api-access-tnqsw\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.170246 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/eca1d592-3310-47ed-a815-8f32bc974d9b-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.170284 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eca1d592-3310-47ed-a815-8f32bc974d9b-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.170424 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/eca1d592-3310-47ed-a815-8f32bc974d9b-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.170476 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/eca1d592-3310-47ed-a815-8f32bc974d9b-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.170532 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4f215b85-0c68-4b49-9bbb-370f543c5947\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4f215b85-0c68-4b49-9bbb-370f543c5947\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.170604 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eca1d592-3310-47ed-a815-8f32bc974d9b-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.170695 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/eca1d592-3310-47ed-a815-8f32bc974d9b-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.273772 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eca1d592-3310-47ed-a815-8f32bc974d9b-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.283353 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/eca1d592-3310-47ed-a815-8f32bc974d9b-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.283542 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/eca1d592-3310-47ed-a815-8f32bc974d9b-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.283611 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4f215b85-0c68-4b49-9bbb-370f543c5947\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4f215b85-0c68-4b49-9bbb-370f543c5947\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.283740 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eca1d592-3310-47ed-a815-8f32bc974d9b-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.283983 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/eca1d592-3310-47ed-a815-8f32bc974d9b-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.284070 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnqsw\" (UniqueName: \"kubernetes.io/projected/eca1d592-3310-47ed-a815-8f32bc974d9b-kube-api-access-tnqsw\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.284139 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/eca1d592-3310-47ed-a815-8f32bc974d9b-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.285792 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/eca1d592-3310-47ed-a815-8f32bc974d9b-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.286771 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eca1d592-3310-47ed-a815-8f32bc974d9b-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.273972 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.289309 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.290561 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/eca1d592-3310-47ed-a815-8f32bc974d9b-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.299928 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.301879 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eca1d592-3310-47ed-a815-8f32bc974d9b-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.302120 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.302411 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-jhrv5" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.302692 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/eca1d592-3310-47ed-a815-8f32bc974d9b-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.304732 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/eca1d592-3310-47ed-a815-8f32bc974d9b-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.309261 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.309298 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4f215b85-0c68-4b49-9bbb-370f543c5947\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4f215b85-0c68-4b49-9bbb-370f543c5947\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/264ac7486027c3a99a74b7ddd8008bff100d9262354a6c6907001146870e3f0f/globalmount\"" pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.316017 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.332086 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnqsw\" (UniqueName: \"kubernetes.io/projected/eca1d592-3310-47ed-a815-8f32bc974d9b-kube-api-access-tnqsw\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.386655 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e4ae9bbe-9854-4320-9415-2a894eda782e-config-data\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.386793 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bdj2\" (UniqueName: \"kubernetes.io/projected/e4ae9bbe-9854-4320-9415-2a894eda782e-kube-api-access-7bdj2\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.386840 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4ae9bbe-9854-4320-9415-2a894eda782e-combined-ca-bundle\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.386964 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e4ae9bbe-9854-4320-9415-2a894eda782e-kolla-config\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.387016 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4ae9bbe-9854-4320-9415-2a894eda782e-memcached-tls-certs\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.435111 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4f215b85-0c68-4b49-9bbb-370f543c5947\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4f215b85-0c68-4b49-9bbb-370f543c5947\") pod \"openstack-cell1-galera-0\" (UID: \"eca1d592-3310-47ed-a815-8f32bc974d9b\") " pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.494338 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bdj2\" (UniqueName: \"kubernetes.io/projected/e4ae9bbe-9854-4320-9415-2a894eda782e-kube-api-access-7bdj2\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.494393 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4ae9bbe-9854-4320-9415-2a894eda782e-combined-ca-bundle\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.494507 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e4ae9bbe-9854-4320-9415-2a894eda782e-kolla-config\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.494542 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4ae9bbe-9854-4320-9415-2a894eda782e-memcached-tls-certs\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.494700 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e4ae9bbe-9854-4320-9415-2a894eda782e-config-data\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.496128 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e4ae9bbe-9854-4320-9415-2a894eda782e-config-data\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.497017 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e4ae9bbe-9854-4320-9415-2a894eda782e-kolla-config\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.515374 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4ae9bbe-9854-4320-9415-2a894eda782e-combined-ca-bundle\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.516822 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4ae9bbe-9854-4320-9415-2a894eda782e-memcached-tls-certs\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.521829 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bdj2\" (UniqueName: \"kubernetes.io/projected/e4ae9bbe-9854-4320-9415-2a894eda782e-kube-api-access-7bdj2\") pod \"memcached-0\" (UID: \"e4ae9bbe-9854-4320-9415-2a894eda782e\") " pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.721742 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.753244 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 27 12:48:44 crc kubenswrapper[4900]: I0127 12:48:43.786241 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b5p78" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerName="registry-server" probeResult="failure" output=< Jan 27 12:48:44 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 12:48:44 crc kubenswrapper[4900]: > Jan 27 12:48:45 crc kubenswrapper[4900]: I0127 12:48:45.167438 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 27 12:48:45 crc kubenswrapper[4900]: I0127 12:48:45.218236 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 12:48:45 crc kubenswrapper[4900]: I0127 12:48:45.299176 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 12:48:45 crc kubenswrapper[4900]: I0127 12:48:45.301171 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 27 12:48:45 crc kubenswrapper[4900]: I0127 12:48:45.306213 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-rr6lw" Jan 27 12:48:45 crc kubenswrapper[4900]: I0127 12:48:45.347462 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 12:48:45 crc kubenswrapper[4900]: I0127 12:48:45.447564 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m54v6\" (UniqueName: \"kubernetes.io/projected/1eab4b44-ee86-4d03-99dc-ca014f5c7141-kube-api-access-m54v6\") pod \"kube-state-metrics-0\" (UID: \"1eab4b44-ee86-4d03-99dc-ca014f5c7141\") " pod="openstack/kube-state-metrics-0" Jan 27 12:48:45 crc kubenswrapper[4900]: I0127 12:48:45.551363 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m54v6\" (UniqueName: \"kubernetes.io/projected/1eab4b44-ee86-4d03-99dc-ca014f5c7141-kube-api-access-m54v6\") pod \"kube-state-metrics-0\" (UID: \"1eab4b44-ee86-4d03-99dc-ca014f5c7141\") " pod="openstack/kube-state-metrics-0" Jan 27 12:48:45 crc kubenswrapper[4900]: I0127 12:48:45.554561 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 12:48:45 crc kubenswrapper[4900]: I0127 12:48:45.588177 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 27 12:48:45 crc kubenswrapper[4900]: I0127 12:48:45.603455 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m54v6\" (UniqueName: \"kubernetes.io/projected/1eab4b44-ee86-4d03-99dc-ca014f5c7141-kube-api-access-m54v6\") pod \"kube-state-metrics-0\" (UID: \"1eab4b44-ee86-4d03-99dc-ca014f5c7141\") " pod="openstack/kube-state-metrics-0" Jan 27 12:48:45 crc kubenswrapper[4900]: I0127 12:48:45.648577 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.059661 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs"] Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.063632 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.066898 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-zx497" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.071359 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.077707 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs"] Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.161984 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d6f43148-e0ec-452e-b55f-a6bf0c4d5b37-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-r84zs\" (UID: \"d6f43148-e0ec-452e-b55f-a6bf0c4d5b37\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.162124 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6v4h\" (UniqueName: \"kubernetes.io/projected/d6f43148-e0ec-452e-b55f-a6bf0c4d5b37-kube-api-access-p6v4h\") pod \"observability-ui-dashboards-66cbf594b5-r84zs\" (UID: \"d6f43148-e0ec-452e-b55f-a6bf0c4d5b37\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.266731 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d6f43148-e0ec-452e-b55f-a6bf0c4d5b37-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-r84zs\" (UID: \"d6f43148-e0ec-452e-b55f-a6bf0c4d5b37\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.266947 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6v4h\" (UniqueName: \"kubernetes.io/projected/d6f43148-e0ec-452e-b55f-a6bf0c4d5b37-kube-api-access-p6v4h\") pod \"observability-ui-dashboards-66cbf594b5-r84zs\" (UID: \"d6f43148-e0ec-452e-b55f-a6bf0c4d5b37\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs" Jan 27 12:48:46 crc kubenswrapper[4900]: E0127 12:48:46.267650 4900 secret.go:188] Couldn't get secret openshift-operators/observability-ui-dashboards: secret "observability-ui-dashboards" not found Jan 27 12:48:46 crc kubenswrapper[4900]: E0127 12:48:46.267759 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d6f43148-e0ec-452e-b55f-a6bf0c4d5b37-serving-cert podName:d6f43148-e0ec-452e-b55f-a6bf0c4d5b37 nodeName:}" failed. No retries permitted until 2026-01-27 12:48:46.767715632 +0000 UTC m=+1354.004743842 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d6f43148-e0ec-452e-b55f-a6bf0c4d5b37-serving-cert") pod "observability-ui-dashboards-66cbf594b5-r84zs" (UID: "d6f43148-e0ec-452e-b55f-a6bf0c4d5b37") : secret "observability-ui-dashboards" not found Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.339643 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6v4h\" (UniqueName: \"kubernetes.io/projected/d6f43148-e0ec-452e-b55f-a6bf0c4d5b37-kube-api-access-p6v4h\") pod \"observability-ui-dashboards-66cbf594b5-r84zs\" (UID: \"d6f43148-e0ec-452e-b55f-a6bf0c4d5b37\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.560753 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7f9b6cf6cc-5nhbt"] Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.576268 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.625639 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7f9b6cf6cc-5nhbt"] Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.679229 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-service-ca\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.679360 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cq4fr\" (UniqueName: \"kubernetes.io/projected/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-kube-api-access-cq4fr\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.679482 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-console-oauth-config\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.679529 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-console-config\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.679647 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-console-serving-cert\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.679677 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-trusted-ca-bundle\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.679746 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-oauth-serving-cert\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.717084 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.720434 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.730263 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.730743 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.731234 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.731533 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.735123 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-ljsg8" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.739194 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.743575 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.743887 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.791147 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-oauth-serving-cert\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.791223 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-service-ca\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.791262 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/2537092f-2211-4329-afe3-1e15bdd14256-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.792942 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-oauth-serving-cert\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.793561 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-service-ca\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.798313 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.812246 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d6f43148-e0ec-452e-b55f-a6bf0c4d5b37-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-r84zs\" (UID: \"d6f43148-e0ec-452e-b55f-a6bf0c4d5b37\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.812347 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.812393 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.812444 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cq4fr\" (UniqueName: \"kubernetes.io/projected/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-kube-api-access-cq4fr\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.812564 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.812601 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/2537092f-2211-4329-afe3-1e15bdd14256-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.812637 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.812688 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-console-oauth-config\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.812746 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-config\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.812797 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-console-config\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.812897 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gt8pz\" (UniqueName: \"kubernetes.io/projected/2537092f-2211-4329-afe3-1e15bdd14256-kube-api-access-gt8pz\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.813008 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.813106 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-console-serving-cert\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.813145 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-trusted-ca-bundle\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.813175 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-aec966f1-268a-4a70-9845-26adc756570e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aec966f1-268a-4a70-9845-26adc756570e\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.819402 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-console-config\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.819865 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-trusted-ca-bundle\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.824967 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d6f43148-e0ec-452e-b55f-a6bf0c4d5b37-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-r84zs\" (UID: \"d6f43148-e0ec-452e-b55f-a6bf0c4d5b37\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.839279 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-console-serving-cert\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.839707 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-console-oauth-config\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.858198 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cq4fr\" (UniqueName: \"kubernetes.io/projected/57b93bd3-2b4f-45f5-9691-4b3f553c1c13-kube-api-access-cq4fr\") pod \"console-7f9b6cf6cc-5nhbt\" (UID: \"57b93bd3-2b4f-45f5-9691-4b3f553c1c13\") " pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.917356 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.917394 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/2537092f-2211-4329-afe3-1e15bdd14256-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.917417 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.917452 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-config\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.917498 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gt8pz\" (UniqueName: \"kubernetes.io/projected/2537092f-2211-4329-afe3-1e15bdd14256-kube-api-access-gt8pz\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.917538 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.917583 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-aec966f1-268a-4a70-9845-26adc756570e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aec966f1-268a-4a70-9845-26adc756570e\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.917645 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/2537092f-2211-4329-afe3-1e15bdd14256-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.917686 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.919030 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.923874 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/2537092f-2211-4329-afe3-1e15bdd14256-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.925658 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.926710 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.927163 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.936017 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.936694 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-config\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.939631 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.940424 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/2537092f-2211-4329-afe3-1e15bdd14256-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.940533 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:46 crc kubenswrapper[4900]: I0127 12:48:46.962857 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gt8pz\" (UniqueName: \"kubernetes.io/projected/2537092f-2211-4329-afe3-1e15bdd14256-kube-api-access-gt8pz\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:47 crc kubenswrapper[4900]: I0127 12:48:47.034511 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:48:47 crc kubenswrapper[4900]: I0127 12:48:47.034573 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-aec966f1-268a-4a70-9845-26adc756570e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aec966f1-268a-4a70-9845-26adc756570e\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9c38213e9dd6f58dfaaef0d7677bd37505bee9fcde8c91084cb29405ba765d6a/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:47 crc kubenswrapper[4900]: I0127 12:48:47.043776 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs" Jan 27 12:48:47 crc kubenswrapper[4900]: I0127 12:48:47.152688 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-aec966f1-268a-4a70-9845-26adc756570e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aec966f1-268a-4a70-9845-26adc756570e\") pod \"prometheus-metric-storage-0\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:47 crc kubenswrapper[4900]: I0127 12:48:47.366156 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 12:48:47 crc kubenswrapper[4900]: I0127 12:48:47.991024 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-k9z2g"] Jan 27 12:48:47 crc kubenswrapper[4900]: I0127 12:48:47.993616 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:47 crc kubenswrapper[4900]: I0127 12:48:47.998973 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-f5dth" Jan 27 12:48:47 crc kubenswrapper[4900]: I0127 12:48:47.999210 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 27 12:48:47 crc kubenswrapper[4900]: I0127 12:48:47.999557 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.011497 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-t4b4c"] Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.014438 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.032274 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-t4b4c"] Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.043087 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-k9z2g"] Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.091156 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-var-run\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.091228 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-ovn-controller-tls-certs\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.091276 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-var-run-ovn\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.091297 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-combined-ca-bundle\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.091321 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-scripts\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.091354 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/029e8969-d08d-4909-9409-33f888c56c8c-scripts\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.091385 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcvg4\" (UniqueName: \"kubernetes.io/projected/029e8969-d08d-4909-9409-33f888c56c8c-kube-api-access-gcvg4\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.091411 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/029e8969-d08d-4909-9409-33f888c56c8c-var-run\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.091462 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-var-log-ovn\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.091506 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxpg4\" (UniqueName: \"kubernetes.io/projected/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-kube-api-access-zxpg4\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.091532 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/029e8969-d08d-4909-9409-33f888c56c8c-var-lib\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.091593 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/029e8969-d08d-4909-9409-33f888c56c8c-var-log\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.091612 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/029e8969-d08d-4909-9409-33f888c56c8c-etc-ovs\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.193661 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxpg4\" (UniqueName: \"kubernetes.io/projected/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-kube-api-access-zxpg4\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.193739 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/029e8969-d08d-4909-9409-33f888c56c8c-var-lib\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.193834 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/029e8969-d08d-4909-9409-33f888c56c8c-var-log\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.193861 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/029e8969-d08d-4909-9409-33f888c56c8c-etc-ovs\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.193933 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-var-run\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.193991 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-ovn-controller-tls-certs\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.194038 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-var-run-ovn\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.194078 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-combined-ca-bundle\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.194114 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-scripts\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.194157 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/029e8969-d08d-4909-9409-33f888c56c8c-scripts\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.194187 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcvg4\" (UniqueName: \"kubernetes.io/projected/029e8969-d08d-4909-9409-33f888c56c8c-kube-api-access-gcvg4\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.194214 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/029e8969-d08d-4909-9409-33f888c56c8c-var-run\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.194281 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-var-log-ovn\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.194505 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/029e8969-d08d-4909-9409-33f888c56c8c-var-lib\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.195194 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/029e8969-d08d-4909-9409-33f888c56c8c-var-log\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.195361 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/029e8969-d08d-4909-9409-33f888c56c8c-etc-ovs\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.195371 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-var-log-ovn\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.195454 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-var-run\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.195558 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-var-run-ovn\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.195664 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/029e8969-d08d-4909-9409-33f888c56c8c-var-run\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.196687 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-scripts\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.201643 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/029e8969-d08d-4909-9409-33f888c56c8c-scripts\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.202297 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-combined-ca-bundle\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.214357 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxpg4\" (UniqueName: \"kubernetes.io/projected/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-kube-api-access-zxpg4\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.214497 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75-ovn-controller-tls-certs\") pod \"ovn-controller-k9z2g\" (UID: \"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75\") " pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.218608 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcvg4\" (UniqueName: \"kubernetes.io/projected/029e8969-d08d-4909-9409-33f888c56c8c-kube-api-access-gcvg4\") pod \"ovn-controller-ovs-t4b4c\" (UID: \"029e8969-d08d-4909-9409-33f888c56c8c\") " pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.329657 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k9z2g" Jan 27 12:48:48 crc kubenswrapper[4900]: I0127 12:48:48.342337 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.418362 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.423256 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.427377 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.427616 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.427742 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.428003 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-tm2lb" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.428276 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.436753 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.527546 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c76c04d4-a881-4504-a00f-3b227187edfa-config\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.527606 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwjzb\" (UniqueName: \"kubernetes.io/projected/c76c04d4-a881-4504-a00f-3b227187edfa-kube-api-access-cwjzb\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.527649 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c76c04d4-a881-4504-a00f-3b227187edfa-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.527673 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c76c04d4-a881-4504-a00f-3b227187edfa-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.527697 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c76c04d4-a881-4504-a00f-3b227187edfa-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.528025 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8ae2267b-5a49-4f0b-90c3-8da79c163615\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8ae2267b-5a49-4f0b-90c3-8da79c163615\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.528333 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c76c04d4-a881-4504-a00f-3b227187edfa-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.528387 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c76c04d4-a881-4504-a00f-3b227187edfa-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.630870 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c76c04d4-a881-4504-a00f-3b227187edfa-config\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.630929 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwjzb\" (UniqueName: \"kubernetes.io/projected/c76c04d4-a881-4504-a00f-3b227187edfa-kube-api-access-cwjzb\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.630982 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c76c04d4-a881-4504-a00f-3b227187edfa-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.631015 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c76c04d4-a881-4504-a00f-3b227187edfa-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.631034 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c76c04d4-a881-4504-a00f-3b227187edfa-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.631118 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8ae2267b-5a49-4f0b-90c3-8da79c163615\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8ae2267b-5a49-4f0b-90c3-8da79c163615\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.631161 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c76c04d4-a881-4504-a00f-3b227187edfa-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.631186 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c76c04d4-a881-4504-a00f-3b227187edfa-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.631893 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c76c04d4-a881-4504-a00f-3b227187edfa-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.633389 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c76c04d4-a881-4504-a00f-3b227187edfa-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.634337 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.634369 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8ae2267b-5a49-4f0b-90c3-8da79c163615\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8ae2267b-5a49-4f0b-90c3-8da79c163615\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/67739c716ac48f7b0be31f5b1c8665011326c36d77bd8065f249815b9a68afd6/globalmount\"" pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.638723 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c76c04d4-a881-4504-a00f-3b227187edfa-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.638707 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c76c04d4-a881-4504-a00f-3b227187edfa-config\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.639915 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c76c04d4-a881-4504-a00f-3b227187edfa-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.644151 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c76c04d4-a881-4504-a00f-3b227187edfa-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.653625 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwjzb\" (UniqueName: \"kubernetes.io/projected/c76c04d4-a881-4504-a00f-3b227187edfa-kube-api-access-cwjzb\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.671406 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8ae2267b-5a49-4f0b-90c3-8da79c163615\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8ae2267b-5a49-4f0b-90c3-8da79c163615\") pod \"ovsdbserver-nb-0\" (UID: \"c76c04d4-a881-4504-a00f-3b227187edfa\") " pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: I0127 12:48:49.752127 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 27 12:48:49 crc kubenswrapper[4900]: W0127 12:48:49.925601 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb703ff05_40e9_4e7e_bbf4_463a6e8a9ed5.slice/crio-6fd592117ab6e67e028bf454492a86764928942a9bec21c9c70fc056bf93fc9b WatchSource:0}: Error finding container 6fd592117ab6e67e028bf454492a86764928942a9bec21c9c70fc056bf93fc9b: Status 404 returned error can't find the container with id 6fd592117ab6e67e028bf454492a86764928942a9bec21c9c70fc056bf93fc9b Jan 27 12:48:49 crc kubenswrapper[4900]: W0127 12:48:49.928836 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc4594c71_599f_4576_bf95_303da1436ca4.slice/crio-f07de842d7c649658cb9dc9947031c513ab246925a2209ecd3c9ef11ecc4cfab WatchSource:0}: Error finding container f07de842d7c649658cb9dc9947031c513ab246925a2209ecd3c9ef11ecc4cfab: Status 404 returned error can't find the container with id f07de842d7c649658cb9dc9947031c513ab246925a2209ecd3c9ef11ecc4cfab Jan 27 12:48:49 crc kubenswrapper[4900]: W0127 12:48:49.935726 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9cd313ac_a7d9_4d00_9fd3_6b7950a928e4.slice/crio-dd8ee40b0e543e8c143b05564a96072974a33a652a73228c7ab18a504b6f1ff4 WatchSource:0}: Error finding container dd8ee40b0e543e8c143b05564a96072974a33a652a73228c7ab18a504b6f1ff4: Status 404 returned error can't find the container with id dd8ee40b0e543e8c143b05564a96072974a33a652a73228c7ab18a504b6f1ff4 Jan 27 12:48:50 crc kubenswrapper[4900]: I0127 12:48:50.858991 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ca415683-2d53-4bdc-b9f7-c98610a65cc3","Type":"ContainerStarted","Data":"62c7fd063d406e834bf1a8b45c1b1433520dbfe57cabcad90a0bac14c0612e6e"} Jan 27 12:48:50 crc kubenswrapper[4900]: I0127 12:48:50.861764 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5","Type":"ContainerStarted","Data":"6fd592117ab6e67e028bf454492a86764928942a9bec21c9c70fc056bf93fc9b"} Jan 27 12:48:50 crc kubenswrapper[4900]: I0127 12:48:50.863173 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c4594c71-599f-4576-bf95-303da1436ca4","Type":"ContainerStarted","Data":"f07de842d7c649658cb9dc9947031c513ab246925a2209ecd3c9ef11ecc4cfab"} Jan 27 12:48:50 crc kubenswrapper[4900]: I0127 12:48:50.864512 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4","Type":"ContainerStarted","Data":"dd8ee40b0e543e8c143b05564a96072974a33a652a73228c7ab18a504b6f1ff4"} Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.206475 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.209028 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.212101 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.212398 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.212410 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-2phs9" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.214251 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.219303 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.292096 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-30e5fa33-404d-406d-bdc9-f526fbfdec9a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30e5fa33-404d-406d-bdc9-f526fbfdec9a\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.292215 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6217f66e-2295-46d6-878c-cc9457712a8c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.292257 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6217f66e-2295-46d6-878c-cc9457712a8c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.292326 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6217f66e-2295-46d6-878c-cc9457712a8c-config\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.292645 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwb4d\" (UniqueName: \"kubernetes.io/projected/6217f66e-2295-46d6-878c-cc9457712a8c-kube-api-access-wwb4d\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.292919 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6217f66e-2295-46d6-878c-cc9457712a8c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.292998 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6217f66e-2295-46d6-878c-cc9457712a8c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.293161 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6217f66e-2295-46d6-878c-cc9457712a8c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.372813 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.372911 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.395408 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-30e5fa33-404d-406d-bdc9-f526fbfdec9a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30e5fa33-404d-406d-bdc9-f526fbfdec9a\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.395487 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6217f66e-2295-46d6-878c-cc9457712a8c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.395513 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6217f66e-2295-46d6-878c-cc9457712a8c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.395598 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6217f66e-2295-46d6-878c-cc9457712a8c-config\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.396207 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6217f66e-2295-46d6-878c-cc9457712a8c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.396601 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwb4d\" (UniqueName: \"kubernetes.io/projected/6217f66e-2295-46d6-878c-cc9457712a8c-kube-api-access-wwb4d\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.396664 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6217f66e-2295-46d6-878c-cc9457712a8c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.396718 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6217f66e-2295-46d6-878c-cc9457712a8c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.396801 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6217f66e-2295-46d6-878c-cc9457712a8c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.396997 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6217f66e-2295-46d6-878c-cc9457712a8c-config\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.397960 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6217f66e-2295-46d6-878c-cc9457712a8c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.400528 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.400577 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-30e5fa33-404d-406d-bdc9-f526fbfdec9a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30e5fa33-404d-406d-bdc9-f526fbfdec9a\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/bdd8261026b5845ee341a69c8b8eb97a3aff1baf1958a8e3b647f076a7dcfaa6/globalmount\"" pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.401133 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6217f66e-2295-46d6-878c-cc9457712a8c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.401367 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6217f66e-2295-46d6-878c-cc9457712a8c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.410315 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6217f66e-2295-46d6-878c-cc9457712a8c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.412899 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwb4d\" (UniqueName: \"kubernetes.io/projected/6217f66e-2295-46d6-878c-cc9457712a8c-kube-api-access-wwb4d\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.437112 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-30e5fa33-404d-406d-bdc9-f526fbfdec9a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30e5fa33-404d-406d-bdc9-f526fbfdec9a\") pod \"ovsdbserver-sb-0\" (UID: \"6217f66e-2295-46d6-878c-cc9457712a8c\") " pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:52 crc kubenswrapper[4900]: I0127 12:48:52.542512 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 27 12:48:53 crc kubenswrapper[4900]: I0127 12:48:53.763924 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b5p78" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerName="registry-server" probeResult="failure" output=< Jan 27 12:48:53 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 12:48:53 crc kubenswrapper[4900]: > Jan 27 12:48:56 crc kubenswrapper[4900]: I0127 12:48:56.406655 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 27 12:48:59 crc kubenswrapper[4900]: I0127 12:48:59.907392 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 27 12:49:03 crc kubenswrapper[4900]: I0127 12:49:03.784048 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b5p78" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerName="registry-server" probeResult="failure" output=< Jan 27 12:49:03 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 12:49:03 crc kubenswrapper[4900]: > Jan 27 12:49:03 crc kubenswrapper[4900]: W0127 12:49:03.937663 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7626ad91_9f29_4dae_969a_e23d420319ac.slice/crio-cc16f91b296ba7528ccb1483eb06cccb300af98ebed6fbc681a732e3d6145d19 WatchSource:0}: Error finding container cc16f91b296ba7528ccb1483eb06cccb300af98ebed6fbc681a732e3d6145d19: Status 404 returned error can't find the container with id cc16f91b296ba7528ccb1483eb06cccb300af98ebed6fbc681a732e3d6145d19 Jan 27 12:49:03 crc kubenswrapper[4900]: W0127 12:49:03.977963 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc76c04d4_a881_4504_a00f_3b227187edfa.slice/crio-9f67b2308895642d8a2c54674772f6d159a6eb1ecfc29905ac196958b9e7aa9c WatchSource:0}: Error finding container 9f67b2308895642d8a2c54674772f6d159a6eb1ecfc29905ac196958b9e7aa9c: Status 404 returned error can't find the container with id 9f67b2308895642d8a2c54674772f6d159a6eb1ecfc29905ac196958b9e7aa9c Jan 27 12:49:04 crc kubenswrapper[4900]: I0127 12:49:04.338049 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7626ad91-9f29-4dae-969a-e23d420319ac","Type":"ContainerStarted","Data":"cc16f91b296ba7528ccb1483eb06cccb300af98ebed6fbc681a732e3d6145d19"} Jan 27 12:49:04 crc kubenswrapper[4900]: I0127 12:49:04.339740 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c76c04d4-a881-4504-a00f-3b227187edfa","Type":"ContainerStarted","Data":"9f67b2308895642d8a2c54674772f6d159a6eb1ecfc29905ac196958b9e7aa9c"} Jan 27 12:49:05 crc kubenswrapper[4900]: E0127 12:49:05.698313 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 27 12:49:05 crc kubenswrapper[4900]: E0127 12:49:05.699019 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ghx8b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-f9tqz_openstack(4d02f684-ed10-4d14-8a81-fea1453f4b2a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:49:05 crc kubenswrapper[4900]: E0127 12:49:05.700358 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" podUID="4d02f684-ed10-4d14-8a81-fea1453f4b2a" Jan 27 12:49:05 crc kubenswrapper[4900]: E0127 12:49:05.756413 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 27 12:49:05 crc kubenswrapper[4900]: E0127 12:49:05.756847 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-442jh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-gf2v2_openstack(2f1bb41c-4126-4f2a-95a0-917dec713e9d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:49:05 crc kubenswrapper[4900]: E0127 12:49:05.758362 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" podUID="2f1bb41c-4126-4f2a-95a0-917dec713e9d" Jan 27 12:49:05 crc kubenswrapper[4900]: E0127 12:49:05.812840 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 27 12:49:05 crc kubenswrapper[4900]: E0127 12:49:05.814939 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jjm9f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-wh4rj_openstack(d8dcc12a-564d-412b-b443-70036eab30a7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:49:05 crc kubenswrapper[4900]: E0127 12:49:05.823772 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" podUID="d8dcc12a-564d-412b-b443-70036eab30a7" Jan 27 12:49:05 crc kubenswrapper[4900]: E0127 12:49:05.995235 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 27 12:49:05 crc kubenswrapper[4900]: E0127 12:49:05.996529 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xflwh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-64kbp_openstack(ea7300b4-1cbb-4f83-8ac3-668d188d0e8b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:49:06 crc kubenswrapper[4900]: E0127 12:49:06.001865 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-64kbp" podUID="ea7300b4-1cbb-4f83-8ac3-668d188d0e8b" Jan 27 12:49:06 crc kubenswrapper[4900]: E0127 12:49:06.372292 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" podUID="4d02f684-ed10-4d14-8a81-fea1453f4b2a" Jan 27 12:49:06 crc kubenswrapper[4900]: E0127 12:49:06.372464 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" podUID="d8dcc12a-564d-412b-b443-70036eab30a7" Jan 27 12:49:07 crc kubenswrapper[4900]: I0127 12:49:07.067574 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-k9z2g"] Jan 27 12:49:07 crc kubenswrapper[4900]: I0127 12:49:07.081788 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7f9b6cf6cc-5nhbt"] Jan 27 12:49:07 crc kubenswrapper[4900]: I0127 12:49:07.094885 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 12:49:07 crc kubenswrapper[4900]: I0127 12:49:07.102229 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 12:49:07 crc kubenswrapper[4900]: I0127 12:49:07.258563 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-t4b4c"] Jan 27 12:49:07 crc kubenswrapper[4900]: W0127 12:49:07.314490 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1eab4b44_ee86_4d03_99dc_ca014f5c7141.slice/crio-44df8d0b6f39308f51b152745144aa85a2f7bb8a43282debde0a9129f004755e WatchSource:0}: Error finding container 44df8d0b6f39308f51b152745144aa85a2f7bb8a43282debde0a9129f004755e: Status 404 returned error can't find the container with id 44df8d0b6f39308f51b152745144aa85a2f7bb8a43282debde0a9129f004755e Jan 27 12:49:07 crc kubenswrapper[4900]: W0127 12:49:07.328447 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2537092f_2211_4329_afe3_1e15bdd14256.slice/crio-83b8c607df95c25e894c821482a1db35c4151bbefef56df832703ffedc1d7ee4 WatchSource:0}: Error finding container 83b8c607df95c25e894c821482a1db35c4151bbefef56df832703ffedc1d7ee4: Status 404 returned error can't find the container with id 83b8c607df95c25e894c821482a1db35c4151bbefef56df832703ffedc1d7ee4 Jan 27 12:49:07 crc kubenswrapper[4900]: I0127 12:49:07.378630 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7f9b6cf6cc-5nhbt" event={"ID":"57b93bd3-2b4f-45f5-9691-4b3f553c1c13","Type":"ContainerStarted","Data":"fb9ff88546478716bdda4aff7073c8d61446299ed5702d9eddb0897940983d0e"} Jan 27 12:49:07 crc kubenswrapper[4900]: I0127 12:49:07.380888 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"2537092f-2211-4329-afe3-1e15bdd14256","Type":"ContainerStarted","Data":"83b8c607df95c25e894c821482a1db35c4151bbefef56df832703ffedc1d7ee4"} Jan 27 12:49:07 crc kubenswrapper[4900]: I0127 12:49:07.382458 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-t4b4c" event={"ID":"029e8969-d08d-4909-9409-33f888c56c8c","Type":"ContainerStarted","Data":"7803fe13bbc42c45086310f4107a8950e355a6bec5a60ac57c6111e6076d4ae1"} Jan 27 12:49:07 crc kubenswrapper[4900]: I0127 12:49:07.384199 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-k9z2g" event={"ID":"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75","Type":"ContainerStarted","Data":"5a08df5a33e8984e9a6f2316ffdf17c1aec708da88df0d68d2bd3356f58e2957"} Jan 27 12:49:07 crc kubenswrapper[4900]: I0127 12:49:07.385591 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1eab4b44-ee86-4d03-99dc-ca014f5c7141","Type":"ContainerStarted","Data":"44df8d0b6f39308f51b152745144aa85a2f7bb8a43282debde0a9129f004755e"} Jan 27 12:49:07 crc kubenswrapper[4900]: I0127 12:49:07.478236 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 27 12:49:07 crc kubenswrapper[4900]: I0127 12:49:07.493897 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 27 12:49:07 crc kubenswrapper[4900]: I0127 12:49:07.511925 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs"] Jan 27 12:49:07 crc kubenswrapper[4900]: I0127 12:49:07.559615 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 27 12:49:07 crc kubenswrapper[4900]: W0127 12:49:07.604507 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6f43148_e0ec_452e_b55f_a6bf0c4d5b37.slice/crio-d61c41fb76863c04abb698cba131f827814a2b22a6e94946c4d7d820a8a9f7d4 WatchSource:0}: Error finding container d61c41fb76863c04abb698cba131f827814a2b22a6e94946c4d7d820a8a9f7d4: Status 404 returned error can't find the container with id d61c41fb76863c04abb698cba131f827814a2b22a6e94946c4d7d820a8a9f7d4 Jan 27 12:49:07 crc kubenswrapper[4900]: W0127 12:49:07.607540 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6217f66e_2295_46d6_878c_cc9457712a8c.slice/crio-278306afdc5a8de2aad74810cf5b148e7e9f47990ec67185c39c8ab1a44f1b1a WatchSource:0}: Error finding container 278306afdc5a8de2aad74810cf5b148e7e9f47990ec67185c39c8ab1a44f1b1a: Status 404 returned error can't find the container with id 278306afdc5a8de2aad74810cf5b148e7e9f47990ec67185c39c8ab1a44f1b1a Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.329139 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.336449 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-64kbp" Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.408330 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"6217f66e-2295-46d6-878c-cc9457712a8c","Type":"ContainerStarted","Data":"278306afdc5a8de2aad74810cf5b148e7e9f47990ec67185c39c8ab1a44f1b1a"} Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.410557 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"eca1d592-3310-47ed-a815-8f32bc974d9b","Type":"ContainerStarted","Data":"d104854bb3289895a14ec6a4a2dabcd0d412e5f63fdc07d08e7c0c731c88314c"} Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.416211 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c4594c71-599f-4576-bf95-303da1436ca4","Type":"ContainerStarted","Data":"67576cb1aa6e3a772fd86cf622769d1edc10f4bad80bb0919389db6d1691721a"} Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.418970 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4","Type":"ContainerStarted","Data":"6c04151f3bb23a39db7a3351f189d2470c0bb62e4146259b779155de735bdd7b"} Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.426072 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" event={"ID":"2f1bb41c-4126-4f2a-95a0-917dec713e9d","Type":"ContainerDied","Data":"13ad07709ebbc07eb19329e4f3a48cc897eee033a73ea2de261dbe94cd6a1eca"} Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.426177 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-gf2v2" Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.431391 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5","Type":"ContainerStarted","Data":"6f1f46121f527e90245df33b315070a32d8b9124af3c0346e9752c25456c84ae"} Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.436511 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-64kbp" event={"ID":"ea7300b4-1cbb-4f83-8ac3-668d188d0e8b","Type":"ContainerDied","Data":"93af817d2fd516f53e13e05408003794cd5ffda9b3617bb31a8492f1c780a661"} Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.436519 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-64kbp" Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.439511 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ca415683-2d53-4bdc-b9f7-c98610a65cc3","Type":"ContainerStarted","Data":"4a96193197e852909fdf2ce6bdab6d9377bee4d8f7c787ddc670fdc082daea92"} Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.452867 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs" event={"ID":"d6f43148-e0ec-452e-b55f-a6bf0c4d5b37","Type":"ContainerStarted","Data":"d61c41fb76863c04abb698cba131f827814a2b22a6e94946c4d7d820a8a9f7d4"} Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.456599 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"e4ae9bbe-9854-4320-9415-2a894eda782e","Type":"ContainerStarted","Data":"f9959c78d45c45f22fee59633d7c01de1f1fd562b27f7a5f1eeddcd9ebb020a8"} Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.520670 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f1bb41c-4126-4f2a-95a0-917dec713e9d-dns-svc\") pod \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\" (UID: \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\") " Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.520767 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f1bb41c-4126-4f2a-95a0-917dec713e9d-config\") pod \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\" (UID: \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\") " Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.520989 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xflwh\" (UniqueName: \"kubernetes.io/projected/ea7300b4-1cbb-4f83-8ac3-668d188d0e8b-kube-api-access-xflwh\") pod \"ea7300b4-1cbb-4f83-8ac3-668d188d0e8b\" (UID: \"ea7300b4-1cbb-4f83-8ac3-668d188d0e8b\") " Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.521093 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea7300b4-1cbb-4f83-8ac3-668d188d0e8b-config\") pod \"ea7300b4-1cbb-4f83-8ac3-668d188d0e8b\" (UID: \"ea7300b4-1cbb-4f83-8ac3-668d188d0e8b\") " Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.521173 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-442jh\" (UniqueName: \"kubernetes.io/projected/2f1bb41c-4126-4f2a-95a0-917dec713e9d-kube-api-access-442jh\") pod \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\" (UID: \"2f1bb41c-4126-4f2a-95a0-917dec713e9d\") " Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.521403 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f1bb41c-4126-4f2a-95a0-917dec713e9d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2f1bb41c-4126-4f2a-95a0-917dec713e9d" (UID: "2f1bb41c-4126-4f2a-95a0-917dec713e9d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.521827 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f1bb41c-4126-4f2a-95a0-917dec713e9d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.522385 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f1bb41c-4126-4f2a-95a0-917dec713e9d-config" (OuterVolumeSpecName: "config") pod "2f1bb41c-4126-4f2a-95a0-917dec713e9d" (UID: "2f1bb41c-4126-4f2a-95a0-917dec713e9d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.522915 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea7300b4-1cbb-4f83-8ac3-668d188d0e8b-config" (OuterVolumeSpecName: "config") pod "ea7300b4-1cbb-4f83-8ac3-668d188d0e8b" (UID: "ea7300b4-1cbb-4f83-8ac3-668d188d0e8b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.530719 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f1bb41c-4126-4f2a-95a0-917dec713e9d-kube-api-access-442jh" (OuterVolumeSpecName: "kube-api-access-442jh") pod "2f1bb41c-4126-4f2a-95a0-917dec713e9d" (UID: "2f1bb41c-4126-4f2a-95a0-917dec713e9d"). InnerVolumeSpecName "kube-api-access-442jh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.537257 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea7300b4-1cbb-4f83-8ac3-668d188d0e8b-kube-api-access-xflwh" (OuterVolumeSpecName: "kube-api-access-xflwh") pod "ea7300b4-1cbb-4f83-8ac3-668d188d0e8b" (UID: "ea7300b4-1cbb-4f83-8ac3-668d188d0e8b"). InnerVolumeSpecName "kube-api-access-xflwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.624695 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xflwh\" (UniqueName: \"kubernetes.io/projected/ea7300b4-1cbb-4f83-8ac3-668d188d0e8b-kube-api-access-xflwh\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.624743 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea7300b4-1cbb-4f83-8ac3-668d188d0e8b-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.624755 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-442jh\" (UniqueName: \"kubernetes.io/projected/2f1bb41c-4126-4f2a-95a0-917dec713e9d-kube-api-access-442jh\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.624765 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f1bb41c-4126-4f2a-95a0-917dec713e9d-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.821809 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-gf2v2"] Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.834139 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-gf2v2"] Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.857605 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-64kbp"] Jan 27 12:49:08 crc kubenswrapper[4900]: I0127 12:49:08.866409 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-64kbp"] Jan 27 12:49:09 crc kubenswrapper[4900]: I0127 12:49:09.497845 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7f9b6cf6cc-5nhbt" event={"ID":"57b93bd3-2b4f-45f5-9691-4b3f553c1c13","Type":"ContainerStarted","Data":"1486a83c328515a3c06276ab1bba948642e2a66f6d7ea2d81221a1ccc5b752bd"} Jan 27 12:49:09 crc kubenswrapper[4900]: I0127 12:49:09.537328 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7f9b6cf6cc-5nhbt" podStartSLOduration=23.537284651 podStartE2EDuration="23.537284651s" podCreationTimestamp="2026-01-27 12:48:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:49:09.531184653 +0000 UTC m=+1376.768212863" watchObservedRunningTime="2026-01-27 12:49:09.537284651 +0000 UTC m=+1376.774312861" Jan 27 12:49:10 crc kubenswrapper[4900]: I0127 12:49:10.499459 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f1bb41c-4126-4f2a-95a0-917dec713e9d" path="/var/lib/kubelet/pods/2f1bb41c-4126-4f2a-95a0-917dec713e9d/volumes" Jan 27 12:49:10 crc kubenswrapper[4900]: I0127 12:49:10.500081 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea7300b4-1cbb-4f83-8ac3-668d188d0e8b" path="/var/lib/kubelet/pods/ea7300b4-1cbb-4f83-8ac3-668d188d0e8b/volumes" Jan 27 12:49:13 crc kubenswrapper[4900]: I0127 12:49:13.769952 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b5p78" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerName="registry-server" probeResult="failure" output=< Jan 27 12:49:13 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 12:49:13 crc kubenswrapper[4900]: > Jan 27 12:49:16 crc kubenswrapper[4900]: I0127 12:49:16.940842 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:49:16 crc kubenswrapper[4900]: I0127 12:49:16.941509 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:49:16 crc kubenswrapper[4900]: I0127 12:49:16.945482 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:49:17 crc kubenswrapper[4900]: I0127 12:49:17.604204 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 12:49:17 crc kubenswrapper[4900]: I0127 12:49:17.676791 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5d4d5db777-x4428"] Jan 27 12:49:18 crc kubenswrapper[4900]: E0127 12:49:18.509686 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified" Jan 27 12:49:18 crc kubenswrapper[4900]: E0127 12:49:18.510025 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovsdbserver-nb,Image:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,Command:[/usr/bin/dumb-init],Args:[/usr/local/bin/container-scripts/setup.sh],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n556h565h587h595h5fdh669h587hd7h5bdhbbh78h554h697h7dh5f8h697h577hcch584h5c5h5d4h67chc9h68fh694hcdhd5h664h5bbh67fhb6h55bq,ValueFrom:nil,},EnvVar{Name:OVN_LOGDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovndbcluster-nb-etc-ovn,ReadOnly:false,MountPath:/etc/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cwjzb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/cleanup.sh],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:20,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-nb-0_openstack(c76c04d4-a881-4504-a00f-3b227187edfa): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:49:22 crc kubenswrapper[4900]: I0127 12:49:22.372982 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:49:22 crc kubenswrapper[4900]: I0127 12:49:22.373659 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:49:22 crc kubenswrapper[4900]: I0127 12:49:22.373710 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:49:22 crc kubenswrapper[4900]: I0127 12:49:22.374850 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d3eb521560952eab8f11162cda8d03a25740b3b833254e8284177a101ff26343"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 12:49:22 crc kubenswrapper[4900]: I0127 12:49:22.374938 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://d3eb521560952eab8f11162cda8d03a25740b3b833254e8284177a101ff26343" gracePeriod=600 Jan 27 12:49:22 crc kubenswrapper[4900]: I0127 12:49:22.709732 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="d3eb521560952eab8f11162cda8d03a25740b3b833254e8284177a101ff26343" exitCode=0 Jan 27 12:49:22 crc kubenswrapper[4900]: I0127 12:49:22.709786 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"d3eb521560952eab8f11162cda8d03a25740b3b833254e8284177a101ff26343"} Jan 27 12:49:22 crc kubenswrapper[4900]: I0127 12:49:22.710190 4900 scope.go:117] "RemoveContainer" containerID="311b2446296261b95a4a8935d2688a7bfd4e27781be2ac18543d5aed5bad7b0a" Jan 27 12:49:23 crc kubenswrapper[4900]: I0127 12:49:23.721468 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876"} Jan 27 12:49:23 crc kubenswrapper[4900]: I0127 12:49:23.724032 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"e4ae9bbe-9854-4320-9415-2a894eda782e","Type":"ContainerStarted","Data":"99751ddeabb352057d638f7f90b9d9f2231cec7a2923f977e3430b766b2b96b4"} Jan 27 12:49:23 crc kubenswrapper[4900]: I0127 12:49:23.724198 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 27 12:49:23 crc kubenswrapper[4900]: I0127 12:49:23.726363 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7626ad91-9f29-4dae-969a-e23d420319ac","Type":"ContainerStarted","Data":"bb4fe9c9382999e365d16e63f4daef8b6d615db47ad22e63db217db1a5e3b2f7"} Jan 27 12:49:23 crc kubenswrapper[4900]: I0127 12:49:23.727754 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"6217f66e-2295-46d6-878c-cc9457712a8c","Type":"ContainerStarted","Data":"3354473eaadeb07f60047d85924e376a7e5d29e7523db8863ed28ca625d9e346"} Jan 27 12:49:23 crc kubenswrapper[4900]: I0127 12:49:23.729077 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1eab4b44-ee86-4d03-99dc-ca014f5c7141","Type":"ContainerStarted","Data":"5ada64f44fa5b78970a0628bad9e8de2ccda0971814026d6d4d1a306611eedc5"} Jan 27 12:49:23 crc kubenswrapper[4900]: I0127 12:49:23.729160 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 27 12:49:23 crc kubenswrapper[4900]: I0127 12:49:23.730494 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"eca1d592-3310-47ed-a815-8f32bc974d9b","Type":"ContainerStarted","Data":"f4a879f19775cac2970a7d89376884e19d39318c4633401bf93040dfe62be007"} Jan 27 12:49:23 crc kubenswrapper[4900]: I0127 12:49:23.731534 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs" event={"ID":"d6f43148-e0ec-452e-b55f-a6bf0c4d5b37","Type":"ContainerStarted","Data":"de549d25f585bad6a14ef1fa44c6443d2fdafc09c50d30622ff17c5c60565595"} Jan 27 12:49:23 crc kubenswrapper[4900]: I0127 12:49:23.798732 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=23.55892393 podStartE2EDuration="38.798702331s" podCreationTimestamp="2026-01-27 12:48:45 +0000 UTC" firstStartedPulling="2026-01-27 12:49:07.317669847 +0000 UTC m=+1374.554698057" lastFinishedPulling="2026-01-27 12:49:22.557448258 +0000 UTC m=+1389.794476458" observedRunningTime="2026-01-27 12:49:23.78665381 +0000 UTC m=+1391.023682030" watchObservedRunningTime="2026-01-27 12:49:23.798702331 +0000 UTC m=+1391.035730541" Jan 27 12:49:23 crc kubenswrapper[4900]: I0127 12:49:23.818285 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=29.310763673 podStartE2EDuration="40.818260592s" podCreationTimestamp="2026-01-27 12:48:43 +0000 UTC" firstStartedPulling="2026-01-27 12:49:07.496239363 +0000 UTC m=+1374.733267573" lastFinishedPulling="2026-01-27 12:49:19.003736282 +0000 UTC m=+1386.240764492" observedRunningTime="2026-01-27 12:49:23.810228707 +0000 UTC m=+1391.047256927" watchObservedRunningTime="2026-01-27 12:49:23.818260592 +0000 UTC m=+1391.055288792" Jan 27 12:49:23 crc kubenswrapper[4900]: I0127 12:49:23.838271 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-r84zs" podStartSLOduration=26.307575089 podStartE2EDuration="37.838244334s" podCreationTimestamp="2026-01-27 12:48:46 +0000 UTC" firstStartedPulling="2026-01-27 12:49:07.606665382 +0000 UTC m=+1374.843693592" lastFinishedPulling="2026-01-27 12:49:19.137334627 +0000 UTC m=+1386.374362837" observedRunningTime="2026-01-27 12:49:23.833130345 +0000 UTC m=+1391.070158575" watchObservedRunningTime="2026-01-27 12:49:23.838244334 +0000 UTC m=+1391.075272544" Jan 27 12:49:24 crc kubenswrapper[4900]: I0127 12:49:24.305616 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b5p78" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerName="registry-server" probeResult="failure" output=< Jan 27 12:49:24 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 12:49:24 crc kubenswrapper[4900]: > Jan 27 12:49:24 crc kubenswrapper[4900]: E0127 12:49:24.629101 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-nb-0" podUID="c76c04d4-a881-4504-a00f-3b227187edfa" Jan 27 12:49:24 crc kubenswrapper[4900]: I0127 12:49:24.743988 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c76c04d4-a881-4504-a00f-3b227187edfa","Type":"ContainerStarted","Data":"fde5ac6cd07c52e8965f0498ef086d7b2ca2b2f780c7b7fe50f8139ae2067530"} Jan 27 12:49:24 crc kubenswrapper[4900]: E0127 12:49:24.748120 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="c76c04d4-a881-4504-a00f-3b227187edfa" Jan 27 12:49:24 crc kubenswrapper[4900]: I0127 12:49:24.748873 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-t4b4c" event={"ID":"029e8969-d08d-4909-9409-33f888c56c8c","Type":"ContainerStarted","Data":"4501bbb1d001a096893b8b28bf18962068ccd99c437fd50d529e8b5c3ecb1916"} Jan 27 12:49:24 crc kubenswrapper[4900]: I0127 12:49:24.751408 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-k9z2g" event={"ID":"c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75","Type":"ContainerStarted","Data":"12e0737d5c3a98f311977c09312214f588c4fab0defb6b295f496d39e1b9be4f"} Jan 27 12:49:24 crc kubenswrapper[4900]: I0127 12:49:24.751852 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-k9z2g" Jan 27 12:49:24 crc kubenswrapper[4900]: I0127 12:49:24.753812 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"6217f66e-2295-46d6-878c-cc9457712a8c","Type":"ContainerStarted","Data":"9c66fcf0fb05ed8b10b50c1499c1e2e88e1b4ce181c455a138791d470b670216"} Jan 27 12:49:24 crc kubenswrapper[4900]: I0127 12:49:24.756857 4900 generic.go:334] "Generic (PLEG): container finished" podID="4d02f684-ed10-4d14-8a81-fea1453f4b2a" containerID="b5e9c10b40cc33b89dd99e59e1db54083b63986c31770c97f91a392e10f311f2" exitCode=0 Jan 27 12:49:24 crc kubenswrapper[4900]: I0127 12:49:24.756969 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" event={"ID":"4d02f684-ed10-4d14-8a81-fea1453f4b2a","Type":"ContainerDied","Data":"b5e9c10b40cc33b89dd99e59e1db54083b63986c31770c97f91a392e10f311f2"} Jan 27 12:49:24 crc kubenswrapper[4900]: I0127 12:49:24.759246 4900 generic.go:334] "Generic (PLEG): container finished" podID="d8dcc12a-564d-412b-b443-70036eab30a7" containerID="6eac136f7d86675e5a72e914d0bcd57ef768fb9954ef16dd6c7058d88357ee49" exitCode=0 Jan 27 12:49:24 crc kubenswrapper[4900]: I0127 12:49:24.759330 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" event={"ID":"d8dcc12a-564d-412b-b443-70036eab30a7","Type":"ContainerDied","Data":"6eac136f7d86675e5a72e914d0bcd57ef768fb9954ef16dd6c7058d88357ee49"} Jan 27 12:49:24 crc kubenswrapper[4900]: I0127 12:49:24.828512 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=22.305233472 podStartE2EDuration="33.828487081s" podCreationTimestamp="2026-01-27 12:48:51 +0000 UTC" firstStartedPulling="2026-01-27 12:49:07.614072788 +0000 UTC m=+1374.851100998" lastFinishedPulling="2026-01-27 12:49:19.137326407 +0000 UTC m=+1386.374354607" observedRunningTime="2026-01-27 12:49:24.825398871 +0000 UTC m=+1392.062427081" watchObservedRunningTime="2026-01-27 12:49:24.828487081 +0000 UTC m=+1392.065515291" Jan 27 12:49:24 crc kubenswrapper[4900]: I0127 12:49:24.873683 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-k9z2g" podStartSLOduration=25.906832018 podStartE2EDuration="37.873665408s" podCreationTimestamp="2026-01-27 12:48:47 +0000 UTC" firstStartedPulling="2026-01-27 12:49:07.312737963 +0000 UTC m=+1374.549766173" lastFinishedPulling="2026-01-27 12:49:19.279571363 +0000 UTC m=+1386.516599563" observedRunningTime="2026-01-27 12:49:24.869722203 +0000 UTC m=+1392.106750413" watchObservedRunningTime="2026-01-27 12:49:24.873665408 +0000 UTC m=+1392.110693618" Jan 27 12:49:25 crc kubenswrapper[4900]: I0127 12:49:25.542863 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 27 12:49:25 crc kubenswrapper[4900]: I0127 12:49:25.775452 4900 generic.go:334] "Generic (PLEG): container finished" podID="029e8969-d08d-4909-9409-33f888c56c8c" containerID="4501bbb1d001a096893b8b28bf18962068ccd99c437fd50d529e8b5c3ecb1916" exitCode=0 Jan 27 12:49:25 crc kubenswrapper[4900]: I0127 12:49:25.775566 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-t4b4c" event={"ID":"029e8969-d08d-4909-9409-33f888c56c8c","Type":"ContainerDied","Data":"4501bbb1d001a096893b8b28bf18962068ccd99c437fd50d529e8b5c3ecb1916"} Jan 27 12:49:25 crc kubenswrapper[4900]: I0127 12:49:25.778642 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" event={"ID":"4d02f684-ed10-4d14-8a81-fea1453f4b2a","Type":"ContainerStarted","Data":"ba08e6489fce6af9cde5b167a1012814965c96141fe01dcb7bbdb6a39ca0fd88"} Jan 27 12:49:25 crc kubenswrapper[4900]: I0127 12:49:25.779501 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" Jan 27 12:49:25 crc kubenswrapper[4900]: I0127 12:49:25.784938 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" event={"ID":"d8dcc12a-564d-412b-b443-70036eab30a7","Type":"ContainerStarted","Data":"963545e1d70e05f86c2ef1de6fd812cecc5bc7c4b0f297d6ba707f4d0d6c9675"} Jan 27 12:49:25 crc kubenswrapper[4900]: I0127 12:49:25.785294 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" Jan 27 12:49:25 crc kubenswrapper[4900]: I0127 12:49:25.789373 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"2537092f-2211-4329-afe3-1e15bdd14256","Type":"ContainerStarted","Data":"f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a"} Jan 27 12:49:25 crc kubenswrapper[4900]: E0127 12:49:25.807599 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="c76c04d4-a881-4504-a00f-3b227187edfa" Jan 27 12:49:25 crc kubenswrapper[4900]: I0127 12:49:25.832966 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" podStartSLOduration=4.756137556 podStartE2EDuration="46.832942743s" podCreationTimestamp="2026-01-27 12:48:39 +0000 UTC" firstStartedPulling="2026-01-27 12:48:40.545154631 +0000 UTC m=+1347.782182841" lastFinishedPulling="2026-01-27 12:49:22.621959818 +0000 UTC m=+1389.858988028" observedRunningTime="2026-01-27 12:49:25.823153147 +0000 UTC m=+1393.060181377" watchObservedRunningTime="2026-01-27 12:49:25.832942743 +0000 UTC m=+1393.069970953" Jan 27 12:49:25 crc kubenswrapper[4900]: I0127 12:49:25.853091 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" podStartSLOduration=5.673864782 podStartE2EDuration="47.853045479s" podCreationTimestamp="2026-01-27 12:48:38 +0000 UTC" firstStartedPulling="2026-01-27 12:48:40.423015495 +0000 UTC m=+1347.660043705" lastFinishedPulling="2026-01-27 12:49:22.602196192 +0000 UTC m=+1389.839224402" observedRunningTime="2026-01-27 12:49:25.847471416 +0000 UTC m=+1393.084499626" watchObservedRunningTime="2026-01-27 12:49:25.853045479 +0000 UTC m=+1393.090073689" Jan 27 12:49:26 crc kubenswrapper[4900]: I0127 12:49:26.820372 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-t4b4c" event={"ID":"029e8969-d08d-4909-9409-33f888c56c8c","Type":"ContainerStarted","Data":"bb290e12e20b7d258a5164531248596e09021134a54e72a99c807b4444a95e60"} Jan 27 12:49:26 crc kubenswrapper[4900]: I0127 12:49:26.821268 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:49:26 crc kubenswrapper[4900]: I0127 12:49:26.821286 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-t4b4c" event={"ID":"029e8969-d08d-4909-9409-33f888c56c8c","Type":"ContainerStarted","Data":"92074e3e2e03a2d0d2813bd8d5ad87e51803a0c6860c786aa73f77091ae5c58a"} Jan 27 12:49:26 crc kubenswrapper[4900]: I0127 12:49:26.821306 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:49:26 crc kubenswrapper[4900]: I0127 12:49:26.855556 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-t4b4c" podStartSLOduration=28.049969394 podStartE2EDuration="39.855533713s" podCreationTimestamp="2026-01-27 12:48:47 +0000 UTC" firstStartedPulling="2026-01-27 12:49:07.331768568 +0000 UTC m=+1374.568796778" lastFinishedPulling="2026-01-27 12:49:19.137332897 +0000 UTC m=+1386.374361097" observedRunningTime="2026-01-27 12:49:26.848503158 +0000 UTC m=+1394.085531368" watchObservedRunningTime="2026-01-27 12:49:26.855533713 +0000 UTC m=+1394.092561923" Jan 27 12:49:27 crc kubenswrapper[4900]: I0127 12:49:27.543328 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 27 12:49:27 crc kubenswrapper[4900]: I0127 12:49:27.823985 4900 generic.go:334] "Generic (PLEG): container finished" podID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerID="f4a879f19775cac2970a7d89376884e19d39318c4633401bf93040dfe62be007" exitCode=0 Jan 27 12:49:27 crc kubenswrapper[4900]: I0127 12:49:27.824236 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"eca1d592-3310-47ed-a815-8f32bc974d9b","Type":"ContainerDied","Data":"f4a879f19775cac2970a7d89376884e19d39318c4633401bf93040dfe62be007"} Jan 27 12:49:27 crc kubenswrapper[4900]: I0127 12:49:27.830110 4900 generic.go:334] "Generic (PLEG): container finished" podID="7626ad91-9f29-4dae-969a-e23d420319ac" containerID="bb4fe9c9382999e365d16e63f4daef8b6d615db47ad22e63db217db1a5e3b2f7" exitCode=0 Jan 27 12:49:27 crc kubenswrapper[4900]: I0127 12:49:27.830228 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7626ad91-9f29-4dae-969a-e23d420319ac","Type":"ContainerDied","Data":"bb4fe9c9382999e365d16e63f4daef8b6d615db47ad22e63db217db1a5e3b2f7"} Jan 27 12:49:28 crc kubenswrapper[4900]: I0127 12:49:28.590757 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 27 12:49:28 crc kubenswrapper[4900]: I0127 12:49:28.757473 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 27 12:49:28 crc kubenswrapper[4900]: I0127 12:49:28.844115 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"eca1d592-3310-47ed-a815-8f32bc974d9b","Type":"ContainerStarted","Data":"dab18b2a809157ae109d5e39c95348af980e050d00841ca94df479d1c2b86402"} Jan 27 12:49:28 crc kubenswrapper[4900]: I0127 12:49:28.846411 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7626ad91-9f29-4dae-969a-e23d420319ac","Type":"ContainerStarted","Data":"6fdd71884020c539bd9777c35cbe22887618ed748b95b5f0a35e3375e433dbda"} Jan 27 12:49:28 crc kubenswrapper[4900]: I0127 12:49:28.881029 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=35.690033946 podStartE2EDuration="46.881003908s" podCreationTimestamp="2026-01-27 12:48:42 +0000 UTC" firstStartedPulling="2026-01-27 12:49:07.605099036 +0000 UTC m=+1374.842127246" lastFinishedPulling="2026-01-27 12:49:18.796068998 +0000 UTC m=+1386.033097208" observedRunningTime="2026-01-27 12:49:28.873661154 +0000 UTC m=+1396.110689364" watchObservedRunningTime="2026-01-27 12:49:28.881003908 +0000 UTC m=+1396.118032118" Jan 27 12:49:28 crc kubenswrapper[4900]: I0127 12:49:28.902394 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 27 12:49:28 crc kubenswrapper[4900]: I0127 12:49:28.922825 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=34.934971952 podStartE2EDuration="48.922797466s" podCreationTimestamp="2026-01-27 12:48:40 +0000 UTC" firstStartedPulling="2026-01-27 12:49:03.970509183 +0000 UTC m=+1371.207537393" lastFinishedPulling="2026-01-27 12:49:17.958334697 +0000 UTC m=+1385.195362907" observedRunningTime="2026-01-27 12:49:28.918144 +0000 UTC m=+1396.155172230" watchObservedRunningTime="2026-01-27 12:49:28.922797466 +0000 UTC m=+1396.159825676" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.228541 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-f9tqz"] Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.228775 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" podUID="4d02f684-ed10-4d14-8a81-fea1453f4b2a" containerName="dnsmasq-dns" containerID="cri-o://ba08e6489fce6af9cde5b167a1012814965c96141fe01dcb7bbdb6a39ca0fd88" gracePeriod=10 Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.287497 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-hcsbg"] Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.289810 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.294892 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.302104 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-4stm8"] Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.303495 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.312102 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.314280 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-4stm8"] Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.323984 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-hcsbg"] Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.450873 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e98cfca-ae2e-4650-a9e8-e21f215546cb-config\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.451255 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0e98cfca-ae2e-4650-a9e8-e21f215546cb-ovs-rundir\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.451277 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0e98cfca-ae2e-4650-a9e8-e21f215546cb-ovn-rundir\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.451296 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e98cfca-ae2e-4650-a9e8-e21f215546cb-combined-ca-bundle\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.451340 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwdzk\" (UniqueName: \"kubernetes.io/projected/0e98cfca-ae2e-4650-a9e8-e21f215546cb-kube-api-access-vwdzk\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.451395 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-hcsbg\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.451417 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfq9s\" (UniqueName: \"kubernetes.io/projected/30eb2e94-9007-40f0-84cb-0da33a4aacc8-kube-api-access-jfq9s\") pod \"dnsmasq-dns-7f896c8c65-hcsbg\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.451443 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-config\") pod \"dnsmasq-dns-7f896c8c65-hcsbg\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.451475 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-hcsbg\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.451546 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e98cfca-ae2e-4650-a9e8-e21f215546cb-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.584548 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e98cfca-ae2e-4650-a9e8-e21f215546cb-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.584646 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e98cfca-ae2e-4650-a9e8-e21f215546cb-config\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.584712 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0e98cfca-ae2e-4650-a9e8-e21f215546cb-ovs-rundir\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.584732 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0e98cfca-ae2e-4650-a9e8-e21f215546cb-ovn-rundir\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.584755 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e98cfca-ae2e-4650-a9e8-e21f215546cb-combined-ca-bundle\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.584794 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwdzk\" (UniqueName: \"kubernetes.io/projected/0e98cfca-ae2e-4650-a9e8-e21f215546cb-kube-api-access-vwdzk\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.584863 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-hcsbg\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.584885 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfq9s\" (UniqueName: \"kubernetes.io/projected/30eb2e94-9007-40f0-84cb-0da33a4aacc8-kube-api-access-jfq9s\") pod \"dnsmasq-dns-7f896c8c65-hcsbg\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.584921 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-config\") pod \"dnsmasq-dns-7f896c8c65-hcsbg\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.584959 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-hcsbg\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.586129 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0e98cfca-ae2e-4650-a9e8-e21f215546cb-ovs-rundir\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.586259 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0e98cfca-ae2e-4650-a9e8-e21f215546cb-ovn-rundir\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.587343 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e98cfca-ae2e-4650-a9e8-e21f215546cb-config\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.592622 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-config\") pod \"dnsmasq-dns-7f896c8c65-hcsbg\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.596255 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-hcsbg\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.596659 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-hcsbg\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.596879 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e98cfca-ae2e-4650-a9e8-e21f215546cb-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.611963 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfq9s\" (UniqueName: \"kubernetes.io/projected/30eb2e94-9007-40f0-84cb-0da33a4aacc8-kube-api-access-jfq9s\") pod \"dnsmasq-dns-7f896c8c65-hcsbg\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.627796 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e98cfca-ae2e-4650-a9e8-e21f215546cb-combined-ca-bundle\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.630209 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwdzk\" (UniqueName: \"kubernetes.io/projected/0e98cfca-ae2e-4650-a9e8-e21f215546cb-kube-api-access-vwdzk\") pod \"ovn-controller-metrics-4stm8\" (UID: \"0e98cfca-ae2e-4650-a9e8-e21f215546cb\") " pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.633921 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.683574 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wh4rj"] Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.683847 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" podUID="d8dcc12a-564d-412b-b443-70036eab30a7" containerName="dnsmasq-dns" containerID="cri-o://963545e1d70e05f86c2ef1de6fd812cecc5bc7c4b0f297d6ba707f4d0d6c9675" gracePeriod=10 Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.715747 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-4stm8" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.752312 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-m4f5x"] Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.754979 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.775698 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.786196 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-m4f5x"] Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.828340 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4b9c\" (UniqueName: \"kubernetes.io/projected/596b98f0-2b51-4a1e-ae50-a16dc5097101-kube-api-access-m4b9c\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.828540 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.828598 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.828667 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-config\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.829039 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.913267 4900 generic.go:334] "Generic (PLEG): container finished" podID="d8dcc12a-564d-412b-b443-70036eab30a7" containerID="963545e1d70e05f86c2ef1de6fd812cecc5bc7c4b0f297d6ba707f4d0d6c9675" exitCode=0 Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.913334 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" event={"ID":"d8dcc12a-564d-412b-b443-70036eab30a7","Type":"ContainerDied","Data":"963545e1d70e05f86c2ef1de6fd812cecc5bc7c4b0f297d6ba707f4d0d6c9675"} Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.924936 4900 generic.go:334] "Generic (PLEG): container finished" podID="4d02f684-ed10-4d14-8a81-fea1453f4b2a" containerID="ba08e6489fce6af9cde5b167a1012814965c96141fe01dcb7bbdb6a39ca0fd88" exitCode=0 Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.925038 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" event={"ID":"4d02f684-ed10-4d14-8a81-fea1453f4b2a","Type":"ContainerDied","Data":"ba08e6489fce6af9cde5b167a1012814965c96141fe01dcb7bbdb6a39ca0fd88"} Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.930272 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.930369 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4b9c\" (UniqueName: \"kubernetes.io/projected/596b98f0-2b51-4a1e-ae50-a16dc5097101-kube-api-access-m4b9c\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.930426 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.930450 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.930479 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-config\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.931606 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-config\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.932202 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.934144 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.934299 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:29 crc kubenswrapper[4900]: I0127 12:49:29.950097 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4b9c\" (UniqueName: \"kubernetes.io/projected/596b98f0-2b51-4a1e-ae50-a16dc5097101-kube-api-access-m4b9c\") pod \"dnsmasq-dns-86db49b7ff-m4f5x\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.052863 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.057419 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.246114 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d02f684-ed10-4d14-8a81-fea1453f4b2a-dns-svc\") pod \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\" (UID: \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\") " Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.246403 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghx8b\" (UniqueName: \"kubernetes.io/projected/4d02f684-ed10-4d14-8a81-fea1453f4b2a-kube-api-access-ghx8b\") pod \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\" (UID: \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\") " Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.246526 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d02f684-ed10-4d14-8a81-fea1453f4b2a-config\") pod \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\" (UID: \"4d02f684-ed10-4d14-8a81-fea1453f4b2a\") " Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.271678 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d02f684-ed10-4d14-8a81-fea1453f4b2a-kube-api-access-ghx8b" (OuterVolumeSpecName: "kube-api-access-ghx8b") pod "4d02f684-ed10-4d14-8a81-fea1453f4b2a" (UID: "4d02f684-ed10-4d14-8a81-fea1453f4b2a"). InnerVolumeSpecName "kube-api-access-ghx8b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.333030 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d02f684-ed10-4d14-8a81-fea1453f4b2a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4d02f684-ed10-4d14-8a81-fea1453f4b2a" (UID: "4d02f684-ed10-4d14-8a81-fea1453f4b2a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.351885 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghx8b\" (UniqueName: \"kubernetes.io/projected/4d02f684-ed10-4d14-8a81-fea1453f4b2a-kube-api-access-ghx8b\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.351941 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d02f684-ed10-4d14-8a81-fea1453f4b2a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.392233 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-hcsbg"] Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.420531 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d02f684-ed10-4d14-8a81-fea1453f4b2a-config" (OuterVolumeSpecName: "config") pod "4d02f684-ed10-4d14-8a81-fea1453f4b2a" (UID: "4d02f684-ed10-4d14-8a81-fea1453f4b2a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.458938 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d02f684-ed10-4d14-8a81-fea1453f4b2a-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.665474 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.778088 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8dcc12a-564d-412b-b443-70036eab30a7-dns-svc\") pod \"d8dcc12a-564d-412b-b443-70036eab30a7\" (UID: \"d8dcc12a-564d-412b-b443-70036eab30a7\") " Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.778617 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8dcc12a-564d-412b-b443-70036eab30a7-config\") pod \"d8dcc12a-564d-412b-b443-70036eab30a7\" (UID: \"d8dcc12a-564d-412b-b443-70036eab30a7\") " Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.778704 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jjm9f\" (UniqueName: \"kubernetes.io/projected/d8dcc12a-564d-412b-b443-70036eab30a7-kube-api-access-jjm9f\") pod \"d8dcc12a-564d-412b-b443-70036eab30a7\" (UID: \"d8dcc12a-564d-412b-b443-70036eab30a7\") " Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.793447 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8dcc12a-564d-412b-b443-70036eab30a7-kube-api-access-jjm9f" (OuterVolumeSpecName: "kube-api-access-jjm9f") pod "d8dcc12a-564d-412b-b443-70036eab30a7" (UID: "d8dcc12a-564d-412b-b443-70036eab30a7"). InnerVolumeSpecName "kube-api-access-jjm9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.878804 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8dcc12a-564d-412b-b443-70036eab30a7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d8dcc12a-564d-412b-b443-70036eab30a7" (UID: "d8dcc12a-564d-412b-b443-70036eab30a7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.894703 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8dcc12a-564d-412b-b443-70036eab30a7-config" (OuterVolumeSpecName: "config") pod "d8dcc12a-564d-412b-b443-70036eab30a7" (UID: "d8dcc12a-564d-412b-b443-70036eab30a7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.908423 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8dcc12a-564d-412b-b443-70036eab30a7-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.908494 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8dcc12a-564d-412b-b443-70036eab30a7-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.908506 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jjm9f\" (UniqueName: \"kubernetes.io/projected/d8dcc12a-564d-412b-b443-70036eab30a7-kube-api-access-jjm9f\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.934555 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-4stm8"] Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.979113 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" event={"ID":"30eb2e94-9007-40f0-84cb-0da33a4aacc8","Type":"ContainerStarted","Data":"e69891806eab9346dfedcfd946316ca51e776a29500147249cee17b19d6075fd"} Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.994960 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" event={"ID":"4d02f684-ed10-4d14-8a81-fea1453f4b2a","Type":"ContainerDied","Data":"b4ca81df630b7e4fcd5538d8b4c0eac766af730cafeede9658244daf6dfb64fb"} Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.995027 4900 scope.go:117] "RemoveContainer" containerID="ba08e6489fce6af9cde5b167a1012814965c96141fe01dcb7bbdb6a39ca0fd88" Jan 27 12:49:30 crc kubenswrapper[4900]: I0127 12:49:30.995170 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-f9tqz" Jan 27 12:49:31 crc kubenswrapper[4900]: I0127 12:49:31.000551 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" Jan 27 12:49:31 crc kubenswrapper[4900]: I0127 12:49:31.000527 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-wh4rj" event={"ID":"d8dcc12a-564d-412b-b443-70036eab30a7","Type":"ContainerDied","Data":"eda0c825ddee17b68a706e39edbce2571083d7032cfee5736b42823d830a8ba5"} Jan 27 12:49:31 crc kubenswrapper[4900]: I0127 12:49:31.040321 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-f9tqz"] Jan 27 12:49:31 crc kubenswrapper[4900]: I0127 12:49:31.067464 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-f9tqz"] Jan 27 12:49:31 crc kubenswrapper[4900]: I0127 12:49:31.079757 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wh4rj"] Jan 27 12:49:31 crc kubenswrapper[4900]: I0127 12:49:31.091316 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wh4rj"] Jan 27 12:49:31 crc kubenswrapper[4900]: I0127 12:49:31.102660 4900 scope.go:117] "RemoveContainer" containerID="b5e9c10b40cc33b89dd99e59e1db54083b63986c31770c97f91a392e10f311f2" Jan 27 12:49:31 crc kubenswrapper[4900]: I0127 12:49:31.136741 4900 scope.go:117] "RemoveContainer" containerID="963545e1d70e05f86c2ef1de6fd812cecc5bc7c4b0f297d6ba707f4d0d6c9675" Jan 27 12:49:31 crc kubenswrapper[4900]: I0127 12:49:31.184364 4900 scope.go:117] "RemoveContainer" containerID="6eac136f7d86675e5a72e914d0bcd57ef768fb9954ef16dd6c7058d88357ee49" Jan 27 12:49:31 crc kubenswrapper[4900]: I0127 12:49:31.196959 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-m4f5x"] Jan 27 12:49:31 crc kubenswrapper[4900]: W0127 12:49:31.197474 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod596b98f0_2b51_4a1e_ae50_a16dc5097101.slice/crio-c983f102fc9a049979d33b1885451c1fce029e500f7589b6a082b110987b5357 WatchSource:0}: Error finding container c983f102fc9a049979d33b1885451c1fce029e500f7589b6a082b110987b5357: Status 404 returned error can't find the container with id c983f102fc9a049979d33b1885451c1fce029e500f7589b6a082b110987b5357 Jan 27 12:49:32 crc kubenswrapper[4900]: I0127 12:49:32.034350 4900 generic.go:334] "Generic (PLEG): container finished" podID="30eb2e94-9007-40f0-84cb-0da33a4aacc8" containerID="ba28881955eb9fe37b8ed286b756eb94257a6d8f8e6a231ab2a91f5523b3b981" exitCode=0 Jan 27 12:49:32 crc kubenswrapper[4900]: I0127 12:49:32.034463 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" event={"ID":"30eb2e94-9007-40f0-84cb-0da33a4aacc8","Type":"ContainerDied","Data":"ba28881955eb9fe37b8ed286b756eb94257a6d8f8e6a231ab2a91f5523b3b981"} Jan 27 12:49:32 crc kubenswrapper[4900]: I0127 12:49:32.039231 4900 generic.go:334] "Generic (PLEG): container finished" podID="2537092f-2211-4329-afe3-1e15bdd14256" containerID="f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a" exitCode=0 Jan 27 12:49:32 crc kubenswrapper[4900]: I0127 12:49:32.039308 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"2537092f-2211-4329-afe3-1e15bdd14256","Type":"ContainerDied","Data":"f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a"} Jan 27 12:49:32 crc kubenswrapper[4900]: I0127 12:49:32.062163 4900 generic.go:334] "Generic (PLEG): container finished" podID="596b98f0-2b51-4a1e-ae50-a16dc5097101" containerID="0fb6d5d4722cfcdbdb4c63bedd7e5b419ade21989ab569b6265732e123b56530" exitCode=0 Jan 27 12:49:32 crc kubenswrapper[4900]: I0127 12:49:32.070882 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" event={"ID":"596b98f0-2b51-4a1e-ae50-a16dc5097101","Type":"ContainerDied","Data":"0fb6d5d4722cfcdbdb4c63bedd7e5b419ade21989ab569b6265732e123b56530"} Jan 27 12:49:32 crc kubenswrapper[4900]: I0127 12:49:32.070945 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" event={"ID":"596b98f0-2b51-4a1e-ae50-a16dc5097101","Type":"ContainerStarted","Data":"c983f102fc9a049979d33b1885451c1fce029e500f7589b6a082b110987b5357"} Jan 27 12:49:32 crc kubenswrapper[4900]: I0127 12:49:32.076461 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-4stm8" event={"ID":"0e98cfca-ae2e-4650-a9e8-e21f215546cb","Type":"ContainerStarted","Data":"c30c2fdd9e1022d3aaf906160203c19d5e482241441082f48ee7ef5e565e07f1"} Jan 27 12:49:32 crc kubenswrapper[4900]: I0127 12:49:32.076576 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-4stm8" event={"ID":"0e98cfca-ae2e-4650-a9e8-e21f215546cb","Type":"ContainerStarted","Data":"920aeb4a3cf4931c4177f69dde584feb0809b489951db41994c51bd46bf8a208"} Jan 27 12:49:32 crc kubenswrapper[4900]: I0127 12:49:32.180538 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-4stm8" podStartSLOduration=3.180505402 podStartE2EDuration="3.180505402s" podCreationTimestamp="2026-01-27 12:49:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:49:32.162003003 +0000 UTC m=+1399.399031213" watchObservedRunningTime="2026-01-27 12:49:32.180505402 +0000 UTC m=+1399.417533612" Jan 27 12:49:32 crc kubenswrapper[4900]: I0127 12:49:32.264971 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 27 12:49:32 crc kubenswrapper[4900]: I0127 12:49:32.265082 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 27 12:49:32 crc kubenswrapper[4900]: I0127 12:49:32.493310 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d02f684-ed10-4d14-8a81-fea1453f4b2a" path="/var/lib/kubelet/pods/4d02f684-ed10-4d14-8a81-fea1453f4b2a/volumes" Jan 27 12:49:32 crc kubenswrapper[4900]: I0127 12:49:32.494267 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8dcc12a-564d-412b-b443-70036eab30a7" path="/var/lib/kubelet/pods/d8dcc12a-564d-412b-b443-70036eab30a7/volumes" Jan 27 12:49:33 crc kubenswrapper[4900]: I0127 12:49:33.097424 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" event={"ID":"30eb2e94-9007-40f0-84cb-0da33a4aacc8","Type":"ContainerStarted","Data":"873c792865dea7d3d2ae520e368f7decead169bb6d8d0c770a3c184dd7d42a2c"} Jan 27 12:49:33 crc kubenswrapper[4900]: I0127 12:49:33.098690 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:33 crc kubenswrapper[4900]: I0127 12:49:33.100490 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" event={"ID":"596b98f0-2b51-4a1e-ae50-a16dc5097101","Type":"ContainerStarted","Data":"0e5f72475e6f7ad448b7dce1c86db1d882f65c3df7e5ae151d5a54cf5a61fb1d"} Jan 27 12:49:33 crc kubenswrapper[4900]: I0127 12:49:33.120729 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" podStartSLOduration=4.120703431 podStartE2EDuration="4.120703431s" podCreationTimestamp="2026-01-27 12:49:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:49:33.117513698 +0000 UTC m=+1400.354541908" watchObservedRunningTime="2026-01-27 12:49:33.120703431 +0000 UTC m=+1400.357731641" Jan 27 12:49:33 crc kubenswrapper[4900]: I0127 12:49:33.138387 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" podStartSLOduration=4.138356085 podStartE2EDuration="4.138356085s" podCreationTimestamp="2026-01-27 12:49:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:49:33.135870383 +0000 UTC m=+1400.372898603" watchObservedRunningTime="2026-01-27 12:49:33.138356085 +0000 UTC m=+1400.375384295" Jan 27 12:49:33 crc kubenswrapper[4900]: I0127 12:49:33.722544 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 27 12:49:33 crc kubenswrapper[4900]: I0127 12:49:33.722613 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 27 12:49:33 crc kubenswrapper[4900]: I0127 12:49:33.791429 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b5p78" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerName="registry-server" probeResult="failure" output=< Jan 27 12:49:33 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 12:49:33 crc kubenswrapper[4900]: > Jan 27 12:49:34 crc kubenswrapper[4900]: I0127 12:49:34.117912 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:34 crc kubenswrapper[4900]: I0127 12:49:34.643922 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 27 12:49:34 crc kubenswrapper[4900]: I0127 12:49:34.756252 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.535294 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-hsfkh"] Jan 27 12:49:35 crc kubenswrapper[4900]: E0127 12:49:35.535832 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d02f684-ed10-4d14-8a81-fea1453f4b2a" containerName="init" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.535855 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d02f684-ed10-4d14-8a81-fea1453f4b2a" containerName="init" Jan 27 12:49:35 crc kubenswrapper[4900]: E0127 12:49:35.535877 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8dcc12a-564d-412b-b443-70036eab30a7" containerName="dnsmasq-dns" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.535886 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8dcc12a-564d-412b-b443-70036eab30a7" containerName="dnsmasq-dns" Jan 27 12:49:35 crc kubenswrapper[4900]: E0127 12:49:35.535902 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8dcc12a-564d-412b-b443-70036eab30a7" containerName="init" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.535909 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8dcc12a-564d-412b-b443-70036eab30a7" containerName="init" Jan 27 12:49:35 crc kubenswrapper[4900]: E0127 12:49:35.535934 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d02f684-ed10-4d14-8a81-fea1453f4b2a" containerName="dnsmasq-dns" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.535941 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d02f684-ed10-4d14-8a81-fea1453f4b2a" containerName="dnsmasq-dns" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.536210 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d02f684-ed10-4d14-8a81-fea1453f4b2a" containerName="dnsmasq-dns" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.536243 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8dcc12a-564d-412b-b443-70036eab30a7" containerName="dnsmasq-dns" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.537238 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.562746 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-hsfkh"] Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.643910 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-hcsbg"] Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.650683 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77dvk\" (UniqueName: \"kubernetes.io/projected/f2794095-3d03-4cf0-8e7b-ecc39fb3db7a-kube-api-access-77dvk\") pod \"mysqld-exporter-openstack-db-create-hsfkh\" (UID: \"f2794095-3d03-4cf0-8e7b-ecc39fb3db7a\") " pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.650804 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2794095-3d03-4cf0-8e7b-ecc39fb3db7a-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-hsfkh\" (UID: \"f2794095-3d03-4cf0-8e7b-ecc39fb3db7a\") " pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.655868 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.700207 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-22af-account-create-update-5nw2h"] Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.701928 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.711346 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.720592 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-z8pb8"] Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.722994 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.745156 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-22af-account-create-update-5nw2h"] Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.752689 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77dvk\" (UniqueName: \"kubernetes.io/projected/f2794095-3d03-4cf0-8e7b-ecc39fb3db7a-kube-api-access-77dvk\") pod \"mysqld-exporter-openstack-db-create-hsfkh\" (UID: \"f2794095-3d03-4cf0-8e7b-ecc39fb3db7a\") " pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.752827 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2794095-3d03-4cf0-8e7b-ecc39fb3db7a-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-hsfkh\" (UID: \"f2794095-3d03-4cf0-8e7b-ecc39fb3db7a\") " pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.753660 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2794095-3d03-4cf0-8e7b-ecc39fb3db7a-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-hsfkh\" (UID: \"f2794095-3d03-4cf0-8e7b-ecc39fb3db7a\") " pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.787688 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77dvk\" (UniqueName: \"kubernetes.io/projected/f2794095-3d03-4cf0-8e7b-ecc39fb3db7a-kube-api-access-77dvk\") pod \"mysqld-exporter-openstack-db-create-hsfkh\" (UID: \"f2794095-3d03-4cf0-8e7b-ecc39fb3db7a\") " pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.798617 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-z8pb8"] Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.856545 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-config\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.856762 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d14e50e4-6c8c-4eac-af5d-3726019acdc9-operator-scripts\") pod \"mysqld-exporter-22af-account-create-update-5nw2h\" (UID: \"d14e50e4-6c8c-4eac-af5d-3726019acdc9\") " pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.856805 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-dns-svc\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.856864 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.856915 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.857025 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2xlc\" (UniqueName: \"kubernetes.io/projected/d14e50e4-6c8c-4eac-af5d-3726019acdc9-kube-api-access-n2xlc\") pod \"mysqld-exporter-22af-account-create-update-5nw2h\" (UID: \"d14e50e4-6c8c-4eac-af5d-3726019acdc9\") " pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.857108 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phtbq\" (UniqueName: \"kubernetes.io/projected/67c4919e-b3dc-47ea-8728-03d0aaf07c18-kube-api-access-phtbq\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.878991 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.958997 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.959340 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.959413 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2xlc\" (UniqueName: \"kubernetes.io/projected/d14e50e4-6c8c-4eac-af5d-3726019acdc9-kube-api-access-n2xlc\") pod \"mysqld-exporter-22af-account-create-update-5nw2h\" (UID: \"d14e50e4-6c8c-4eac-af5d-3726019acdc9\") " pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.959438 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phtbq\" (UniqueName: \"kubernetes.io/projected/67c4919e-b3dc-47ea-8728-03d0aaf07c18-kube-api-access-phtbq\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.959507 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-config\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.959594 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d14e50e4-6c8c-4eac-af5d-3726019acdc9-operator-scripts\") pod \"mysqld-exporter-22af-account-create-update-5nw2h\" (UID: \"d14e50e4-6c8c-4eac-af5d-3726019acdc9\") " pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.959628 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-dns-svc\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.960858 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-config\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.960973 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.961159 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d14e50e4-6c8c-4eac-af5d-3726019acdc9-operator-scripts\") pod \"mysqld-exporter-22af-account-create-update-5nw2h\" (UID: \"d14e50e4-6c8c-4eac-af5d-3726019acdc9\") " pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.961579 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.961782 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-dns-svc\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.986477 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phtbq\" (UniqueName: \"kubernetes.io/projected/67c4919e-b3dc-47ea-8728-03d0aaf07c18-kube-api-access-phtbq\") pod \"dnsmasq-dns-698758b865-z8pb8\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:35 crc kubenswrapper[4900]: I0127 12:49:35.989560 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2xlc\" (UniqueName: \"kubernetes.io/projected/d14e50e4-6c8c-4eac-af5d-3726019acdc9-kube-api-access-n2xlc\") pod \"mysqld-exporter-22af-account-create-update-5nw2h\" (UID: \"d14e50e4-6c8c-4eac-af5d-3726019acdc9\") " pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" Jan 27 12:49:36 crc kubenswrapper[4900]: I0127 12:49:36.036995 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" Jan 27 12:49:36 crc kubenswrapper[4900]: I0127 12:49:36.077403 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:36 crc kubenswrapper[4900]: I0127 12:49:36.147013 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" podUID="30eb2e94-9007-40f0-84cb-0da33a4aacc8" containerName="dnsmasq-dns" containerID="cri-o://873c792865dea7d3d2ae520e368f7decead169bb6d8d0c770a3c184dd7d42a2c" gracePeriod=10 Jan 27 12:49:36 crc kubenswrapper[4900]: I0127 12:49:36.531584 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-hsfkh"] Jan 27 12:49:36 crc kubenswrapper[4900]: I0127 12:49:36.681147 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-22af-account-create-update-5nw2h"] Jan 27 12:49:36 crc kubenswrapper[4900]: I0127 12:49:36.773219 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-z8pb8"] Jan 27 12:49:36 crc kubenswrapper[4900]: I0127 12:49:36.915926 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.073609 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.073905 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.074796 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.084297 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-4b2dg" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.084435 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.084600 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.084713 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.187142 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" event={"ID":"f2794095-3d03-4cf0-8e7b-ecc39fb3db7a","Type":"ContainerStarted","Data":"7b16b8a1604759c0d13e20a01ccbe3eeb3c8dae9c0b1e0ea584db50804ef643b"} Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.190467 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" event={"ID":"d14e50e4-6c8c-4eac-af5d-3726019acdc9","Type":"ContainerStarted","Data":"d6b94a2b8aec292bdfce17d45609e8c709df6ff8422ef74fab902a4cbbf6fd54"} Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.199869 4900 generic.go:334] "Generic (PLEG): container finished" podID="30eb2e94-9007-40f0-84cb-0da33a4aacc8" containerID="873c792865dea7d3d2ae520e368f7decead169bb6d8d0c770a3c184dd7d42a2c" exitCode=0 Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.199994 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" event={"ID":"30eb2e94-9007-40f0-84cb-0da33a4aacc8","Type":"ContainerDied","Data":"873c792865dea7d3d2ae520e368f7decead169bb6d8d0c770a3c184dd7d42a2c"} Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.211842 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-z8pb8" event={"ID":"67c4919e-b3dc-47ea-8728-03d0aaf07c18","Type":"ContainerStarted","Data":"880ba9dfb2dee180901fcb107a8eb785b54a213ba94f79df835d0f643abf9c45"} Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.225584 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.256000 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/0c2f90a4-baa0-4eeb-a797-3664c306818b-cache\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.256263 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-eb138aa1-c21a-44f7-9bef-ae6dcaca8ce2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-eb138aa1-c21a-44f7-9bef-ae6dcaca8ce2\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.256337 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bk4m9\" (UniqueName: \"kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-kube-api-access-bk4m9\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.256363 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/0c2f90a4-baa0-4eeb-a797-3664c306818b-lock\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.256464 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c2f90a4-baa0-4eeb-a797-3664c306818b-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.263202 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.365275 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bk4m9\" (UniqueName: \"kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-kube-api-access-bk4m9\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.366376 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/0c2f90a4-baa0-4eeb-a797-3664c306818b-lock\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.366506 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c2f90a4-baa0-4eeb-a797-3664c306818b-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.366543 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: E0127 12:49:37.366991 4900 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 27 12:49:37 crc kubenswrapper[4900]: E0127 12:49:37.367023 4900 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 27 12:49:37 crc kubenswrapper[4900]: E0127 12:49:37.367133 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift podName:0c2f90a4-baa0-4eeb-a797-3664c306818b nodeName:}" failed. No retries permitted until 2026-01-27 12:49:37.867097089 +0000 UTC m=+1405.104125299 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift") pod "swift-storage-0" (UID: "0c2f90a4-baa0-4eeb-a797-3664c306818b") : configmap "swift-ring-files" not found Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.368841 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/0c2f90a4-baa0-4eeb-a797-3664c306818b-lock\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.370405 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/0c2f90a4-baa0-4eeb-a797-3664c306818b-cache\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.370757 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-eb138aa1-c21a-44f7-9bef-ae6dcaca8ce2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-eb138aa1-c21a-44f7-9bef-ae6dcaca8ce2\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.371694 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/0c2f90a4-baa0-4eeb-a797-3664c306818b-cache\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.372070 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c2f90a4-baa0-4eeb-a797-3664c306818b-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.381978 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.382041 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-eb138aa1-c21a-44f7-9bef-ae6dcaca8ce2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-eb138aa1-c21a-44f7-9bef-ae6dcaca8ce2\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9ce5f74529361d4ca5b599fe4b81d378c2348e45eeb4990bed0156ce3c457034/globalmount\"" pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.387492 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bk4m9\" (UniqueName: \"kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-kube-api-access-bk4m9\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.490122 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-eb138aa1-c21a-44f7-9bef-ae6dcaca8ce2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-eb138aa1-c21a-44f7-9bef-ae6dcaca8ce2\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:37 crc kubenswrapper[4900]: E0127 12:49:37.902324 4900 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 27 12:49:37 crc kubenswrapper[4900]: E0127 12:49:37.902363 4900 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 27 12:49:37 crc kubenswrapper[4900]: E0127 12:49:37.902439 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift podName:0c2f90a4-baa0-4eeb-a797-3664c306818b nodeName:}" failed. No retries permitted until 2026-01-27 12:49:38.902418733 +0000 UTC m=+1406.139446943 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift") pod "swift-storage-0" (UID: "0c2f90a4-baa0-4eeb-a797-3664c306818b") : configmap "swift-ring-files" not found Jan 27 12:49:37 crc kubenswrapper[4900]: I0127 12:49:37.902141 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.016280 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.107217 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-ovsdbserver-sb\") pod \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.107930 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-dns-svc\") pod \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.107962 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-config\") pod \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.108074 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfq9s\" (UniqueName: \"kubernetes.io/projected/30eb2e94-9007-40f0-84cb-0da33a4aacc8-kube-api-access-jfq9s\") pod \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\" (UID: \"30eb2e94-9007-40f0-84cb-0da33a4aacc8\") " Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.115800 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30eb2e94-9007-40f0-84cb-0da33a4aacc8-kube-api-access-jfq9s" (OuterVolumeSpecName: "kube-api-access-jfq9s") pod "30eb2e94-9007-40f0-84cb-0da33a4aacc8" (UID: "30eb2e94-9007-40f0-84cb-0da33a4aacc8"). InnerVolumeSpecName "kube-api-access-jfq9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.177486 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-config" (OuterVolumeSpecName: "config") pod "30eb2e94-9007-40f0-84cb-0da33a4aacc8" (UID: "30eb2e94-9007-40f0-84cb-0da33a4aacc8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.180419 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "30eb2e94-9007-40f0-84cb-0da33a4aacc8" (UID: "30eb2e94-9007-40f0-84cb-0da33a4aacc8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.193216 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "30eb2e94-9007-40f0-84cb-0da33a4aacc8" (UID: "30eb2e94-9007-40f0-84cb-0da33a4aacc8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.216098 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.216184 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.216199 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfq9s\" (UniqueName: \"kubernetes.io/projected/30eb2e94-9007-40f0-84cb-0da33a4aacc8-kube-api-access-jfq9s\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.216212 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/30eb2e94-9007-40f0-84cb-0da33a4aacc8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.225751 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" event={"ID":"30eb2e94-9007-40f0-84cb-0da33a4aacc8","Type":"ContainerDied","Data":"e69891806eab9346dfedcfd946316ca51e776a29500147249cee17b19d6075fd"} Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.225835 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-hcsbg" Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.225844 4900 scope.go:117] "RemoveContainer" containerID="873c792865dea7d3d2ae520e368f7decead169bb6d8d0c770a3c184dd7d42a2c" Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.273111 4900 scope.go:117] "RemoveContainer" containerID="ba28881955eb9fe37b8ed286b756eb94257a6d8f8e6a231ab2a91f5523b3b981" Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.286353 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-hcsbg"] Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.306045 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-hcsbg"] Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.494937 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30eb2e94-9007-40f0-84cb-0da33a4aacc8" path="/var/lib/kubelet/pods/30eb2e94-9007-40f0-84cb-0da33a4aacc8/volumes" Jan 27 12:49:38 crc kubenswrapper[4900]: I0127 12:49:38.938164 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:38 crc kubenswrapper[4900]: E0127 12:49:38.938510 4900 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 27 12:49:38 crc kubenswrapper[4900]: E0127 12:49:38.938548 4900 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 27 12:49:38 crc kubenswrapper[4900]: E0127 12:49:38.938612 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift podName:0c2f90a4-baa0-4eeb-a797-3664c306818b nodeName:}" failed. No retries permitted until 2026-01-27 12:49:40.938589269 +0000 UTC m=+1408.175617469 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift") pod "swift-storage-0" (UID: "0c2f90a4-baa0-4eeb-a797-3664c306818b") : configmap "swift-ring-files" not found Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.008146 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-2tkfw"] Jan 27 12:49:39 crc kubenswrapper[4900]: E0127 12:49:39.008628 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30eb2e94-9007-40f0-84cb-0da33a4aacc8" containerName="dnsmasq-dns" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.008654 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="30eb2e94-9007-40f0-84cb-0da33a4aacc8" containerName="dnsmasq-dns" Jan 27 12:49:39 crc kubenswrapper[4900]: E0127 12:49:39.008691 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30eb2e94-9007-40f0-84cb-0da33a4aacc8" containerName="init" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.008703 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="30eb2e94-9007-40f0-84cb-0da33a4aacc8" containerName="init" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.008884 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="30eb2e94-9007-40f0-84cb-0da33a4aacc8" containerName="dnsmasq-dns" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.009668 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2tkfw" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.022473 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-2tkfw"] Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.120704 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-7e73-account-create-update-6fnzb"] Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.122647 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-7e73-account-create-update-6fnzb" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.125108 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.143546 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa278afe-1535-4c32-af1f-840d8df9dbe5-operator-scripts\") pod \"glance-db-create-2tkfw\" (UID: \"aa278afe-1535-4c32-af1f-840d8df9dbe5\") " pod="openstack/glance-db-create-2tkfw" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.143590 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vp79\" (UniqueName: \"kubernetes.io/projected/aa278afe-1535-4c32-af1f-840d8df9dbe5-kube-api-access-5vp79\") pod \"glance-db-create-2tkfw\" (UID: \"aa278afe-1535-4c32-af1f-840d8df9dbe5\") " pod="openstack/glance-db-create-2tkfw" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.140769 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-7e73-account-create-update-6fnzb"] Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.247544 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vp79\" (UniqueName: \"kubernetes.io/projected/aa278afe-1535-4c32-af1f-840d8df9dbe5-kube-api-access-5vp79\") pod \"glance-db-create-2tkfw\" (UID: \"aa278afe-1535-4c32-af1f-840d8df9dbe5\") " pod="openstack/glance-db-create-2tkfw" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.247691 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02763fa3-4db9-4750-ba18-7a434e9cd831-operator-scripts\") pod \"glance-7e73-account-create-update-6fnzb\" (UID: \"02763fa3-4db9-4750-ba18-7a434e9cd831\") " pod="openstack/glance-7e73-account-create-update-6fnzb" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.247743 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pm87p\" (UniqueName: \"kubernetes.io/projected/02763fa3-4db9-4750-ba18-7a434e9cd831-kube-api-access-pm87p\") pod \"glance-7e73-account-create-update-6fnzb\" (UID: \"02763fa3-4db9-4750-ba18-7a434e9cd831\") " pod="openstack/glance-7e73-account-create-update-6fnzb" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.248176 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa278afe-1535-4c32-af1f-840d8df9dbe5-operator-scripts\") pod \"glance-db-create-2tkfw\" (UID: \"aa278afe-1535-4c32-af1f-840d8df9dbe5\") " pod="openstack/glance-db-create-2tkfw" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.248941 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa278afe-1535-4c32-af1f-840d8df9dbe5-operator-scripts\") pod \"glance-db-create-2tkfw\" (UID: \"aa278afe-1535-4c32-af1f-840d8df9dbe5\") " pod="openstack/glance-db-create-2tkfw" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.267810 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vp79\" (UniqueName: \"kubernetes.io/projected/aa278afe-1535-4c32-af1f-840d8df9dbe5-kube-api-access-5vp79\") pod \"glance-db-create-2tkfw\" (UID: \"aa278afe-1535-4c32-af1f-840d8df9dbe5\") " pod="openstack/glance-db-create-2tkfw" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.334005 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2tkfw" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.350668 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02763fa3-4db9-4750-ba18-7a434e9cd831-operator-scripts\") pod \"glance-7e73-account-create-update-6fnzb\" (UID: \"02763fa3-4db9-4750-ba18-7a434e9cd831\") " pod="openstack/glance-7e73-account-create-update-6fnzb" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.350748 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pm87p\" (UniqueName: \"kubernetes.io/projected/02763fa3-4db9-4750-ba18-7a434e9cd831-kube-api-access-pm87p\") pod \"glance-7e73-account-create-update-6fnzb\" (UID: \"02763fa3-4db9-4750-ba18-7a434e9cd831\") " pod="openstack/glance-7e73-account-create-update-6fnzb" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.363494 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02763fa3-4db9-4750-ba18-7a434e9cd831-operator-scripts\") pod \"glance-7e73-account-create-update-6fnzb\" (UID: \"02763fa3-4db9-4750-ba18-7a434e9cd831\") " pod="openstack/glance-7e73-account-create-update-6fnzb" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.387865 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pm87p\" (UniqueName: \"kubernetes.io/projected/02763fa3-4db9-4750-ba18-7a434e9cd831-kube-api-access-pm87p\") pod \"glance-7e73-account-create-update-6fnzb\" (UID: \"02763fa3-4db9-4750-ba18-7a434e9cd831\") " pod="openstack/glance-7e73-account-create-update-6fnzb" Jan 27 12:49:39 crc kubenswrapper[4900]: I0127 12:49:39.456661 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-7e73-account-create-update-6fnzb" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.017856 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-2tkfw"] Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.056256 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.195431 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-7e73-account-create-update-6fnzb"] Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.258356 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2tkfw" event={"ID":"aa278afe-1535-4c32-af1f-840d8df9dbe5","Type":"ContainerStarted","Data":"6983eed52d7957e198c9ee3591a13fc7475eae8ec34bcbfc473b9279be24ad5f"} Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.267721 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" event={"ID":"f2794095-3d03-4cf0-8e7b-ecc39fb3db7a","Type":"ContainerStarted","Data":"0b2d2d88e6d93572de7f994bb6020de9c67be42180ca0c184d0bda46a2044b58"} Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.273322 4900 generic.go:334] "Generic (PLEG): container finished" podID="b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" containerID="6f1f46121f527e90245df33b315070a32d8b9124af3c0346e9752c25456c84ae" exitCode=0 Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.273408 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5","Type":"ContainerDied","Data":"6f1f46121f527e90245df33b315070a32d8b9124af3c0346e9752c25456c84ae"} Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.288029 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" event={"ID":"d14e50e4-6c8c-4eac-af5d-3726019acdc9","Type":"ContainerStarted","Data":"49f14d4f9917671596f82ca771908d68402b2ffa5a4cf4fe043f31c03b8b8191"} Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.298730 4900 generic.go:334] "Generic (PLEG): container finished" podID="9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" containerID="6c04151f3bb23a39db7a3351f189d2470c0bb62e4146259b779155de735bdd7b" exitCode=0 Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.298833 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4","Type":"ContainerDied","Data":"6c04151f3bb23a39db7a3351f189d2470c0bb62e4146259b779155de735bdd7b"} Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.306515 4900 generic.go:334] "Generic (PLEG): container finished" podID="ca415683-2d53-4bdc-b9f7-c98610a65cc3" containerID="4a96193197e852909fdf2ce6bdab6d9377bee4d8f7c787ddc670fdc082daea92" exitCode=0 Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.306602 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ca415683-2d53-4bdc-b9f7-c98610a65cc3","Type":"ContainerDied","Data":"4a96193197e852909fdf2ce6bdab6d9377bee4d8f7c787ddc670fdc082daea92"} Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.309861 4900 generic.go:334] "Generic (PLEG): container finished" podID="67c4919e-b3dc-47ea-8728-03d0aaf07c18" containerID="07924ff35100f3b5d21779c1abba6f98705d9382bf52fb1b4961abf725123e2d" exitCode=0 Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.309901 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-z8pb8" event={"ID":"67c4919e-b3dc-47ea-8728-03d0aaf07c18","Type":"ContainerDied","Data":"07924ff35100f3b5d21779c1abba6f98705d9382bf52fb1b4961abf725123e2d"} Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.312504 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" podStartSLOduration=5.312449669 podStartE2EDuration="5.312449669s" podCreationTimestamp="2026-01-27 12:49:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:49:40.286089251 +0000 UTC m=+1407.523117461" watchObservedRunningTime="2026-01-27 12:49:40.312449669 +0000 UTC m=+1407.549477889" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.390332 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" podStartSLOduration=5.390308649 podStartE2EDuration="5.390308649s" podCreationTimestamp="2026-01-27 12:49:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:49:40.379843974 +0000 UTC m=+1407.616872184" watchObservedRunningTime="2026-01-27 12:49:40.390308649 +0000 UTC m=+1407.627336859" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.713901 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-dzlzf"] Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.716499 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.720523 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.720807 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.721045 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.730663 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-dzlzf"] Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.796745 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-etc-swift\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.796805 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-dispersionconf\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.796842 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-scripts\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.797273 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-swiftconf\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.797587 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-ring-data-devices\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.797689 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hx2tc\" (UniqueName: \"kubernetes.io/projected/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-kube-api-access-hx2tc\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.797829 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-combined-ca-bundle\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.889943 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-qtmpr"] Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.892621 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-qtmpr" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.899952 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-ring-data-devices\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.900012 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hx2tc\" (UniqueName: \"kubernetes.io/projected/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-kube-api-access-hx2tc\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.900438 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-combined-ca-bundle\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.900479 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-etc-swift\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.900511 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-dispersionconf\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.900549 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-scripts\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.900666 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-swiftconf\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.902073 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-scripts\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.902636 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.902669 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-qtmpr"] Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.904886 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-ring-data-devices\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.907449 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-combined-ca-bundle\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.913722 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-etc-swift\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.925663 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hx2tc\" (UniqueName: \"kubernetes.io/projected/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-kube-api-access-hx2tc\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.935263 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-swiftconf\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:40 crc kubenswrapper[4900]: I0127 12:49:40.939389 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-dispersionconf\") pod \"swift-ring-rebalance-dzlzf\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.005808 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90bf0fe6-a714-419c-ab4a-46d0ece94652-operator-scripts\") pod \"root-account-create-update-qtmpr\" (UID: \"90bf0fe6-a714-419c-ab4a-46d0ece94652\") " pod="openstack/root-account-create-update-qtmpr" Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.005969 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82wc7\" (UniqueName: \"kubernetes.io/projected/90bf0fe6-a714-419c-ab4a-46d0ece94652-kube-api-access-82wc7\") pod \"root-account-create-update-qtmpr\" (UID: \"90bf0fe6-a714-419c-ab4a-46d0ece94652\") " pod="openstack/root-account-create-update-qtmpr" Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.006032 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:41 crc kubenswrapper[4900]: E0127 12:49:41.006247 4900 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 27 12:49:41 crc kubenswrapper[4900]: E0127 12:49:41.006274 4900 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 27 12:49:41 crc kubenswrapper[4900]: E0127 12:49:41.006341 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift podName:0c2f90a4-baa0-4eeb-a797-3664c306818b nodeName:}" failed. No retries permitted until 2026-01-27 12:49:45.006318717 +0000 UTC m=+1412.243346927 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift") pod "swift-storage-0" (UID: "0c2f90a4-baa0-4eeb-a797-3664c306818b") : configmap "swift-ring-files" not found Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.108984 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90bf0fe6-a714-419c-ab4a-46d0ece94652-operator-scripts\") pod \"root-account-create-update-qtmpr\" (UID: \"90bf0fe6-a714-419c-ab4a-46d0ece94652\") " pod="openstack/root-account-create-update-qtmpr" Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.109214 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82wc7\" (UniqueName: \"kubernetes.io/projected/90bf0fe6-a714-419c-ab4a-46d0ece94652-kube-api-access-82wc7\") pod \"root-account-create-update-qtmpr\" (UID: \"90bf0fe6-a714-419c-ab4a-46d0ece94652\") " pod="openstack/root-account-create-update-qtmpr" Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.109770 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90bf0fe6-a714-419c-ab4a-46d0ece94652-operator-scripts\") pod \"root-account-create-update-qtmpr\" (UID: \"90bf0fe6-a714-419c-ab4a-46d0ece94652\") " pod="openstack/root-account-create-update-qtmpr" Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.115797 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.129804 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82wc7\" (UniqueName: \"kubernetes.io/projected/90bf0fe6-a714-419c-ab4a-46d0ece94652-kube-api-access-82wc7\") pod \"root-account-create-update-qtmpr\" (UID: \"90bf0fe6-a714-419c-ab4a-46d0ece94652\") " pod="openstack/root-account-create-update-qtmpr" Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.325994 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-qtmpr" Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.334798 4900 generic.go:334] "Generic (PLEG): container finished" podID="f2794095-3d03-4cf0-8e7b-ecc39fb3db7a" containerID="0b2d2d88e6d93572de7f994bb6020de9c67be42180ca0c184d0bda46a2044b58" exitCode=0 Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.334935 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" event={"ID":"f2794095-3d03-4cf0-8e7b-ecc39fb3db7a","Type":"ContainerDied","Data":"0b2d2d88e6d93572de7f994bb6020de9c67be42180ca0c184d0bda46a2044b58"} Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.337873 4900 generic.go:334] "Generic (PLEG): container finished" podID="c4594c71-599f-4576-bf95-303da1436ca4" containerID="67576cb1aa6e3a772fd86cf622769d1edc10f4bad80bb0919389db6d1691721a" exitCode=0 Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.337995 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c4594c71-599f-4576-bf95-303da1436ca4","Type":"ContainerDied","Data":"67576cb1aa6e3a772fd86cf622769d1edc10f4bad80bb0919389db6d1691721a"} Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.342342 4900 generic.go:334] "Generic (PLEG): container finished" podID="d14e50e4-6c8c-4eac-af5d-3726019acdc9" containerID="49f14d4f9917671596f82ca771908d68402b2ffa5a4cf4fe043f31c03b8b8191" exitCode=0 Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.342439 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" event={"ID":"d14e50e4-6c8c-4eac-af5d-3726019acdc9","Type":"ContainerDied","Data":"49f14d4f9917671596f82ca771908d68402b2ffa5a4cf4fe043f31c03b8b8191"} Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.354461 4900 generic.go:334] "Generic (PLEG): container finished" podID="aa278afe-1535-4c32-af1f-840d8df9dbe5" containerID="0e841b955ac81d5af63736094f712f5c64b0834928d576c93f3b7daf3f5a6c6f" exitCode=0 Jan 27 12:49:41 crc kubenswrapper[4900]: I0127 12:49:41.354536 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2tkfw" event={"ID":"aa278afe-1535-4c32-af1f-840d8df9dbe5","Type":"ContainerDied","Data":"0e841b955ac81d5af63736094f712f5c64b0834928d576c93f3b7daf3f5a6c6f"} Jan 27 12:49:42 crc kubenswrapper[4900]: I0127 12:49:42.767248 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-5d4d5db777-x4428" podUID="0bc8176c-abeb-4ac1-90dc-d0c26939e6c3" containerName="console" containerID="cri-o://a126780cd6d5ae4eb121559d736cce47c465cf3ca3baff86838c544747a47a0e" gracePeriod=15 Jan 27 12:49:42 crc kubenswrapper[4900]: I0127 12:49:42.805008 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:49:42 crc kubenswrapper[4900]: I0127 12:49:42.865461 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:49:42 crc kubenswrapper[4900]: I0127 12:49:42.934226 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b5p78"] Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.218167 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-w6s6w"] Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.220138 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-w6s6w" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.230917 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8qr9\" (UniqueName: \"kubernetes.io/projected/261d14b0-20d4-45ec-ab1d-fdd704a6630b-kube-api-access-j8qr9\") pod \"keystone-db-create-w6s6w\" (UID: \"261d14b0-20d4-45ec-ab1d-fdd704a6630b\") " pod="openstack/keystone-db-create-w6s6w" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.231149 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/261d14b0-20d4-45ec-ab1d-fdd704a6630b-operator-scripts\") pod \"keystone-db-create-w6s6w\" (UID: \"261d14b0-20d4-45ec-ab1d-fdd704a6630b\") " pod="openstack/keystone-db-create-w6s6w" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.235911 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-w6s6w"] Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.334133 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8qr9\" (UniqueName: \"kubernetes.io/projected/261d14b0-20d4-45ec-ab1d-fdd704a6630b-kube-api-access-j8qr9\") pod \"keystone-db-create-w6s6w\" (UID: \"261d14b0-20d4-45ec-ab1d-fdd704a6630b\") " pod="openstack/keystone-db-create-w6s6w" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.334425 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/261d14b0-20d4-45ec-ab1d-fdd704a6630b-operator-scripts\") pod \"keystone-db-create-w6s6w\" (UID: \"261d14b0-20d4-45ec-ab1d-fdd704a6630b\") " pod="openstack/keystone-db-create-w6s6w" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.335557 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/261d14b0-20d4-45ec-ab1d-fdd704a6630b-operator-scripts\") pod \"keystone-db-create-w6s6w\" (UID: \"261d14b0-20d4-45ec-ab1d-fdd704a6630b\") " pod="openstack/keystone-db-create-w6s6w" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.335820 4900 patch_prober.go:28] interesting pod/console-5d4d5db777-x4428 container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.89:8443/health\": dial tcp 10.217.0.89:8443: connect: connection refused" start-of-body= Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.335958 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-5d4d5db777-x4428" podUID="0bc8176c-abeb-4ac1-90dc-d0c26939e6c3" containerName="console" probeResult="failure" output="Get \"https://10.217.0.89:8443/health\": dial tcp 10.217.0.89:8443: connect: connection refused" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.353647 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-1e0a-account-create-update-jft2t"] Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.355912 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-1e0a-account-create-update-jft2t" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.364092 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.370471 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8qr9\" (UniqueName: \"kubernetes.io/projected/261d14b0-20d4-45ec-ab1d-fdd704a6630b-kube-api-access-j8qr9\") pod \"keystone-db-create-w6s6w\" (UID: \"261d14b0-20d4-45ec-ab1d-fdd704a6630b\") " pod="openstack/keystone-db-create-w6s6w" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.370826 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-1e0a-account-create-update-jft2t"] Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.392936 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5d4d5db777-x4428_0bc8176c-abeb-4ac1-90dc-d0c26939e6c3/console/0.log" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.393005 4900 generic.go:334] "Generic (PLEG): container finished" podID="0bc8176c-abeb-4ac1-90dc-d0c26939e6c3" containerID="a126780cd6d5ae4eb121559d736cce47c465cf3ca3baff86838c544747a47a0e" exitCode=2 Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.393211 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5d4d5db777-x4428" event={"ID":"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3","Type":"ContainerDied","Data":"a126780cd6d5ae4eb121559d736cce47c465cf3ca3baff86838c544747a47a0e"} Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.435626 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqzcd\" (UniqueName: \"kubernetes.io/projected/31ce1b9b-164b-45ad-b989-27e535bbdb8b-kube-api-access-zqzcd\") pod \"keystone-1e0a-account-create-update-jft2t\" (UID: \"31ce1b9b-164b-45ad-b989-27e535bbdb8b\") " pod="openstack/keystone-1e0a-account-create-update-jft2t" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.435716 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31ce1b9b-164b-45ad-b989-27e535bbdb8b-operator-scripts\") pod \"keystone-1e0a-account-create-update-jft2t\" (UID: \"31ce1b9b-164b-45ad-b989-27e535bbdb8b\") " pod="openstack/keystone-1e0a-account-create-update-jft2t" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.538381 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31ce1b9b-164b-45ad-b989-27e535bbdb8b-operator-scripts\") pod \"keystone-1e0a-account-create-update-jft2t\" (UID: \"31ce1b9b-164b-45ad-b989-27e535bbdb8b\") " pod="openstack/keystone-1e0a-account-create-update-jft2t" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.538771 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqzcd\" (UniqueName: \"kubernetes.io/projected/31ce1b9b-164b-45ad-b989-27e535bbdb8b-kube-api-access-zqzcd\") pod \"keystone-1e0a-account-create-update-jft2t\" (UID: \"31ce1b9b-164b-45ad-b989-27e535bbdb8b\") " pod="openstack/keystone-1e0a-account-create-update-jft2t" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.539612 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31ce1b9b-164b-45ad-b989-27e535bbdb8b-operator-scripts\") pod \"keystone-1e0a-account-create-update-jft2t\" (UID: \"31ce1b9b-164b-45ad-b989-27e535bbdb8b\") " pod="openstack/keystone-1e0a-account-create-update-jft2t" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.542223 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-29z9h"] Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.543962 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-29z9h" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.550007 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-w6s6w" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.559788 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-29z9h"] Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.568652 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqzcd\" (UniqueName: \"kubernetes.io/projected/31ce1b9b-164b-45ad-b989-27e535bbdb8b-kube-api-access-zqzcd\") pod \"keystone-1e0a-account-create-update-jft2t\" (UID: \"31ce1b9b-164b-45ad-b989-27e535bbdb8b\") " pod="openstack/keystone-1e0a-account-create-update-jft2t" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.661932 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5ade-account-create-update-drsd8"] Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.663676 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5ade-account-create-update-drsd8" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.670412 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.673825 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5ade-account-create-update-drsd8"] Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.732455 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-1e0a-account-create-update-jft2t" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.746749 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw98f\" (UniqueName: \"kubernetes.io/projected/ee07a3a8-51d8-498e-ace9-c4ea774065fd-kube-api-access-gw98f\") pod \"placement-db-create-29z9h\" (UID: \"ee07a3a8-51d8-498e-ace9-c4ea774065fd\") " pod="openstack/placement-db-create-29z9h" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.746906 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee07a3a8-51d8-498e-ace9-c4ea774065fd-operator-scripts\") pod \"placement-db-create-29z9h\" (UID: \"ee07a3a8-51d8-498e-ace9-c4ea774065fd\") " pod="openstack/placement-db-create-29z9h" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.747158 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c79b5e05-afb6-4a73-90d2-32beab5ba2d3-operator-scripts\") pod \"placement-5ade-account-create-update-drsd8\" (UID: \"c79b5e05-afb6-4a73-90d2-32beab5ba2d3\") " pod="openstack/placement-5ade-account-create-update-drsd8" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.747541 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4z8bl\" (UniqueName: \"kubernetes.io/projected/c79b5e05-afb6-4a73-90d2-32beab5ba2d3-kube-api-access-4z8bl\") pod \"placement-5ade-account-create-update-drsd8\" (UID: \"c79b5e05-afb6-4a73-90d2-32beab5ba2d3\") " pod="openstack/placement-5ade-account-create-update-drsd8" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.850882 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw98f\" (UniqueName: \"kubernetes.io/projected/ee07a3a8-51d8-498e-ace9-c4ea774065fd-kube-api-access-gw98f\") pod \"placement-db-create-29z9h\" (UID: \"ee07a3a8-51d8-498e-ace9-c4ea774065fd\") " pod="openstack/placement-db-create-29z9h" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.851018 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee07a3a8-51d8-498e-ace9-c4ea774065fd-operator-scripts\") pod \"placement-db-create-29z9h\" (UID: \"ee07a3a8-51d8-498e-ace9-c4ea774065fd\") " pod="openstack/placement-db-create-29z9h" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.851217 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c79b5e05-afb6-4a73-90d2-32beab5ba2d3-operator-scripts\") pod \"placement-5ade-account-create-update-drsd8\" (UID: \"c79b5e05-afb6-4a73-90d2-32beab5ba2d3\") " pod="openstack/placement-5ade-account-create-update-drsd8" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.851288 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4z8bl\" (UniqueName: \"kubernetes.io/projected/c79b5e05-afb6-4a73-90d2-32beab5ba2d3-kube-api-access-4z8bl\") pod \"placement-5ade-account-create-update-drsd8\" (UID: \"c79b5e05-afb6-4a73-90d2-32beab5ba2d3\") " pod="openstack/placement-5ade-account-create-update-drsd8" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.852192 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee07a3a8-51d8-498e-ace9-c4ea774065fd-operator-scripts\") pod \"placement-db-create-29z9h\" (UID: \"ee07a3a8-51d8-498e-ace9-c4ea774065fd\") " pod="openstack/placement-db-create-29z9h" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.852385 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c79b5e05-afb6-4a73-90d2-32beab5ba2d3-operator-scripts\") pod \"placement-5ade-account-create-update-drsd8\" (UID: \"c79b5e05-afb6-4a73-90d2-32beab5ba2d3\") " pod="openstack/placement-5ade-account-create-update-drsd8" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.869712 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4z8bl\" (UniqueName: \"kubernetes.io/projected/c79b5e05-afb6-4a73-90d2-32beab5ba2d3-kube-api-access-4z8bl\") pod \"placement-5ade-account-create-update-drsd8\" (UID: \"c79b5e05-afb6-4a73-90d2-32beab5ba2d3\") " pod="openstack/placement-5ade-account-create-update-drsd8" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.870266 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw98f\" (UniqueName: \"kubernetes.io/projected/ee07a3a8-51d8-498e-ace9-c4ea774065fd-kube-api-access-gw98f\") pod \"placement-db-create-29z9h\" (UID: \"ee07a3a8-51d8-498e-ace9-c4ea774065fd\") " pod="openstack/placement-db-create-29z9h" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.914717 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-29z9h" Jan 27 12:49:43 crc kubenswrapper[4900]: I0127 12:49:43.994878 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5ade-account-create-update-drsd8" Jan 27 12:49:44 crc kubenswrapper[4900]: I0127 12:49:44.416593 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-b5p78" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerName="registry-server" containerID="cri-o://cbb57461ade85b8e9900dfb37df333dc4636221e9c9ccb3353807fb9047e2891" gracePeriod=2 Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.078313 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:45 crc kubenswrapper[4900]: E0127 12:49:45.078745 4900 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 27 12:49:45 crc kubenswrapper[4900]: E0127 12:49:45.078779 4900 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 27 12:49:45 crc kubenswrapper[4900]: E0127 12:49:45.078855 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift podName:0c2f90a4-baa0-4eeb-a797-3664c306818b nodeName:}" failed. No retries permitted until 2026-01-27 12:49:53.078827365 +0000 UTC m=+1420.315855575 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift") pod "swift-storage-0" (UID: "0c2f90a4-baa0-4eeb-a797-3664c306818b") : configmap "swift-ring-files" not found Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.311958 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.315878 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2tkfw" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.404145 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d14e50e4-6c8c-4eac-af5d-3726019acdc9-operator-scripts\") pod \"d14e50e4-6c8c-4eac-af5d-3726019acdc9\" (UID: \"d14e50e4-6c8c-4eac-af5d-3726019acdc9\") " Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.404242 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2xlc\" (UniqueName: \"kubernetes.io/projected/d14e50e4-6c8c-4eac-af5d-3726019acdc9-kube-api-access-n2xlc\") pod \"d14e50e4-6c8c-4eac-af5d-3726019acdc9\" (UID: \"d14e50e4-6c8c-4eac-af5d-3726019acdc9\") " Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.404376 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa278afe-1535-4c32-af1f-840d8df9dbe5-operator-scripts\") pod \"aa278afe-1535-4c32-af1f-840d8df9dbe5\" (UID: \"aa278afe-1535-4c32-af1f-840d8df9dbe5\") " Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.404521 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vp79\" (UniqueName: \"kubernetes.io/projected/aa278afe-1535-4c32-af1f-840d8df9dbe5-kube-api-access-5vp79\") pod \"aa278afe-1535-4c32-af1f-840d8df9dbe5\" (UID: \"aa278afe-1535-4c32-af1f-840d8df9dbe5\") " Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.405052 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d14e50e4-6c8c-4eac-af5d-3726019acdc9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d14e50e4-6c8c-4eac-af5d-3726019acdc9" (UID: "d14e50e4-6c8c-4eac-af5d-3726019acdc9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.405445 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d14e50e4-6c8c-4eac-af5d-3726019acdc9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.409168 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d14e50e4-6c8c-4eac-af5d-3726019acdc9-kube-api-access-n2xlc" (OuterVolumeSpecName: "kube-api-access-n2xlc") pod "d14e50e4-6c8c-4eac-af5d-3726019acdc9" (UID: "d14e50e4-6c8c-4eac-af5d-3726019acdc9"). InnerVolumeSpecName "kube-api-access-n2xlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.414418 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa278afe-1535-4c32-af1f-840d8df9dbe5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "aa278afe-1535-4c32-af1f-840d8df9dbe5" (UID: "aa278afe-1535-4c32-af1f-840d8df9dbe5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.418157 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa278afe-1535-4c32-af1f-840d8df9dbe5-kube-api-access-5vp79" (OuterVolumeSpecName: "kube-api-access-5vp79") pod "aa278afe-1535-4c32-af1f-840d8df9dbe5" (UID: "aa278afe-1535-4c32-af1f-840d8df9dbe5"). InnerVolumeSpecName "kube-api-access-5vp79". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.479776 4900 generic.go:334] "Generic (PLEG): container finished" podID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerID="cbb57461ade85b8e9900dfb37df333dc4636221e9c9ccb3353807fb9047e2891" exitCode=0 Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.479873 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5p78" event={"ID":"1344dba3-f0d4-468c-ad6b-1653648f6017","Type":"ContainerDied","Data":"cbb57461ade85b8e9900dfb37df333dc4636221e9c9ccb3353807fb9047e2891"} Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.483656 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" event={"ID":"f2794095-3d03-4cf0-8e7b-ecc39fb3db7a","Type":"ContainerDied","Data":"7b16b8a1604759c0d13e20a01ccbe3eeb3c8dae9c0b1e0ea584db50804ef643b"} Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.483729 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b16b8a1604759c0d13e20a01ccbe3eeb3c8dae9c0b1e0ea584db50804ef643b" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.486172 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" event={"ID":"d14e50e4-6c8c-4eac-af5d-3726019acdc9","Type":"ContainerDied","Data":"d6b94a2b8aec292bdfce17d45609e8c709df6ff8422ef74fab902a4cbbf6fd54"} Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.486372 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d6b94a2b8aec292bdfce17d45609e8c709df6ff8422ef74fab902a4cbbf6fd54" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.486575 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-22af-account-create-update-5nw2h" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.494275 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-7e73-account-create-update-6fnzb" event={"ID":"02763fa3-4db9-4750-ba18-7a434e9cd831","Type":"ContainerStarted","Data":"e482cdf7d7d06f7633b618c05b85d14faba039138e0126ad8965d09c3289b8cc"} Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.503394 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2tkfw" event={"ID":"aa278afe-1535-4c32-af1f-840d8df9dbe5","Type":"ContainerDied","Data":"6983eed52d7957e198c9ee3591a13fc7475eae8ec34bcbfc473b9279be24ad5f"} Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.503428 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6983eed52d7957e198c9ee3591a13fc7475eae8ec34bcbfc473b9279be24ad5f" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.503517 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2tkfw" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.508561 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa278afe-1535-4c32-af1f-840d8df9dbe5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.508599 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vp79\" (UniqueName: \"kubernetes.io/projected/aa278afe-1535-4c32-af1f-840d8df9dbe5-kube-api-access-5vp79\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.508609 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2xlc\" (UniqueName: \"kubernetes.io/projected/d14e50e4-6c8c-4eac-af5d-3726019acdc9-kube-api-access-n2xlc\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.540569 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.609737 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77dvk\" (UniqueName: \"kubernetes.io/projected/f2794095-3d03-4cf0-8e7b-ecc39fb3db7a-kube-api-access-77dvk\") pod \"f2794095-3d03-4cf0-8e7b-ecc39fb3db7a\" (UID: \"f2794095-3d03-4cf0-8e7b-ecc39fb3db7a\") " Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.609805 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2794095-3d03-4cf0-8e7b-ecc39fb3db7a-operator-scripts\") pod \"f2794095-3d03-4cf0-8e7b-ecc39fb3db7a\" (UID: \"f2794095-3d03-4cf0-8e7b-ecc39fb3db7a\") " Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.610361 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2794095-3d03-4cf0-8e7b-ecc39fb3db7a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f2794095-3d03-4cf0-8e7b-ecc39fb3db7a" (UID: "f2794095-3d03-4cf0-8e7b-ecc39fb3db7a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.611006 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2794095-3d03-4cf0-8e7b-ecc39fb3db7a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:45 crc kubenswrapper[4900]: I0127 12:49:45.616533 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2794095-3d03-4cf0-8e7b-ecc39fb3db7a-kube-api-access-77dvk" (OuterVolumeSpecName: "kube-api-access-77dvk") pod "f2794095-3d03-4cf0-8e7b-ecc39fb3db7a" (UID: "f2794095-3d03-4cf0-8e7b-ecc39fb3db7a"). InnerVolumeSpecName "kube-api-access-77dvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:46 crc kubenswrapper[4900]: E0127 12:49:45.707555 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd14e50e4_6c8c_4eac_af5d_3726019acdc9.slice/crio-d6b94a2b8aec292bdfce17d45609e8c709df6ff8422ef74fab902a4cbbf6fd54\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa278afe_1535_4c32_af1f_840d8df9dbe5.slice/crio-6983eed52d7957e198c9ee3591a13fc7475eae8ec34bcbfc473b9279be24ad5f\": RecentStats: unable to find data in memory cache]" Jan 27 12:49:46 crc kubenswrapper[4900]: I0127 12:49:45.712666 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77dvk\" (UniqueName: \"kubernetes.io/projected/f2794095-3d03-4cf0-8e7b-ecc39fb3db7a-kube-api-access-77dvk\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:46 crc kubenswrapper[4900]: I0127 12:49:46.972926 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-hsfkh" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.322492 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.492576 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1344dba3-f0d4-468c-ad6b-1653648f6017-utilities\") pod \"1344dba3-f0d4-468c-ad6b-1653648f6017\" (UID: \"1344dba3-f0d4-468c-ad6b-1653648f6017\") " Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.492737 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6xrfq\" (UniqueName: \"kubernetes.io/projected/1344dba3-f0d4-468c-ad6b-1653648f6017-kube-api-access-6xrfq\") pod \"1344dba3-f0d4-468c-ad6b-1653648f6017\" (UID: \"1344dba3-f0d4-468c-ad6b-1653648f6017\") " Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.492831 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1344dba3-f0d4-468c-ad6b-1653648f6017-catalog-content\") pod \"1344dba3-f0d4-468c-ad6b-1653648f6017\" (UID: \"1344dba3-f0d4-468c-ad6b-1653648f6017\") " Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.494254 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1344dba3-f0d4-468c-ad6b-1653648f6017-utilities" (OuterVolumeSpecName: "utilities") pod "1344dba3-f0d4-468c-ad6b-1653648f6017" (UID: "1344dba3-f0d4-468c-ad6b-1653648f6017"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.560360 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1344dba3-f0d4-468c-ad6b-1653648f6017-kube-api-access-6xrfq" (OuterVolumeSpecName: "kube-api-access-6xrfq") pod "1344dba3-f0d4-468c-ad6b-1653648f6017" (UID: "1344dba3-f0d4-468c-ad6b-1653648f6017"). InnerVolumeSpecName "kube-api-access-6xrfq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.599570 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6xrfq\" (UniqueName: \"kubernetes.io/projected/1344dba3-f0d4-468c-ad6b-1653648f6017-kube-api-access-6xrfq\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.599635 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1344dba3-f0d4-468c-ad6b-1653648f6017-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.655119 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1344dba3-f0d4-468c-ad6b-1653648f6017-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1344dba3-f0d4-468c-ad6b-1653648f6017" (UID: "1344dba3-f0d4-468c-ad6b-1653648f6017"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.686900 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5d4d5db777-x4428_0bc8176c-abeb-4ac1-90dc-d0c26939e6c3/console/0.log" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.686985 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.702379 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1344dba3-f0d4-468c-ad6b-1653648f6017-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.803979 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-oauth-serving-cert\") pod \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.804431 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-trusted-ca-bundle\") pod \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.804481 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-serving-cert\") pod \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.805363 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "0bc8176c-abeb-4ac1-90dc-d0c26939e6c3" (UID: "0bc8176c-abeb-4ac1-90dc-d0c26939e6c3"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.805496 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "0bc8176c-abeb-4ac1-90dc-d0c26939e6c3" (UID: "0bc8176c-abeb-4ac1-90dc-d0c26939e6c3"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.805794 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkf8q\" (UniqueName: \"kubernetes.io/projected/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-kube-api-access-lkf8q\") pod \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.805887 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-oauth-config\") pod \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.805950 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-config\") pod \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.806160 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-service-ca\") pod \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\" (UID: \"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3\") " Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.806938 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-config" (OuterVolumeSpecName: "console-config") pod "0bc8176c-abeb-4ac1-90dc-d0c26939e6c3" (UID: "0bc8176c-abeb-4ac1-90dc-d0c26939e6c3"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.806962 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0bc8176c-abeb-4ac1-90dc-d0c26939e6c3" (UID: "0bc8176c-abeb-4ac1-90dc-d0c26939e6c3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.807740 4900 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.807764 4900 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.807793 4900 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.807805 4900 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.818959 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "0bc8176c-abeb-4ac1-90dc-d0c26939e6c3" (UID: "0bc8176c-abeb-4ac1-90dc-d0c26939e6c3"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.823964 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-kube-api-access-lkf8q" (OuterVolumeSpecName: "kube-api-access-lkf8q") pod "0bc8176c-abeb-4ac1-90dc-d0c26939e6c3" (UID: "0bc8176c-abeb-4ac1-90dc-d0c26939e6c3"). InnerVolumeSpecName "kube-api-access-lkf8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.824595 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "0bc8176c-abeb-4ac1-90dc-d0c26939e6c3" (UID: "0bc8176c-abeb-4ac1-90dc-d0c26939e6c3"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.910650 4900 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.910706 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkf8q\" (UniqueName: \"kubernetes.io/projected/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-kube-api-access-lkf8q\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:47 crc kubenswrapper[4900]: I0127 12:49:47.910726 4900 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.010969 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4","Type":"ContainerStarted","Data":"d61edc48b89987cfbdf4d9a6002014c3ed5eed4ee311dd316a4924ab0d5efdbd"} Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.025696 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ca415683-2d53-4bdc-b9f7-c98610a65cc3","Type":"ContainerStarted","Data":"60f7d34b6f754639110844ff04c74d1c3d674caa3b8cc7ec2a926c024f9d5de9"} Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.026051 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.030545 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-z8pb8" event={"ID":"67c4919e-b3dc-47ea-8728-03d0aaf07c18","Type":"ContainerStarted","Data":"bd25891da5f2894b1dc28a7be89eaefdea2527622d3e2a18e60d2e172cbac996"} Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.031372 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.049319 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5d4d5db777-x4428_0bc8176c-abeb-4ac1-90dc-d0c26939e6c3/console/0.log" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.049548 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5d4d5db777-x4428" event={"ID":"0bc8176c-abeb-4ac1-90dc-d0c26939e6c3","Type":"ContainerDied","Data":"1456d6b33ccd6b32c741a3317e11f6175ce550a21e7b5db9219f842b3cf19051"} Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.049622 4900 scope.go:117] "RemoveContainer" containerID="a126780cd6d5ae4eb121559d736cce47c465cf3ca3baff86838c544747a47a0e" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.049673 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5d4d5db777-x4428" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.075229 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"2537092f-2211-4329-afe3-1e15bdd14256","Type":"ContainerStarted","Data":"274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a"} Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.087010 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5p78" event={"ID":"1344dba3-f0d4-468c-ad6b-1653648f6017","Type":"ContainerDied","Data":"645fdfbcce36732996306a98206ec33247b5fc1ef3fd2b32ad2b8239c2cae71b"} Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.087222 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5p78" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.111349 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-2" podStartSLOduration=53.011843126 podStartE2EDuration="1m9.111323507s" podCreationTimestamp="2026-01-27 12:48:39 +0000 UTC" firstStartedPulling="2026-01-27 12:48:49.941501839 +0000 UTC m=+1357.178530049" lastFinishedPulling="2026-01-27 12:49:06.04098222 +0000 UTC m=+1373.278010430" observedRunningTime="2026-01-27 12:49:48.095008731 +0000 UTC m=+1415.332036951" watchObservedRunningTime="2026-01-27 12:49:48.111323507 +0000 UTC m=+1415.348351717" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.141156 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c4594c71-599f-4576-bf95-303da1436ca4","Type":"ContainerStarted","Data":"71cf4529469023f91cd081cde6cf22cc1dfd82d4ddc1ef4ac3a542a9e983a108"} Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.142398 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.166426 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-7e73-account-create-update-6fnzb" event={"ID":"02763fa3-4db9-4750-ba18-7a434e9cd831","Type":"ContainerStarted","Data":"7b373ed0c02d36b7ba775a9be8852008006162a5ea5e017f61db65917394590e"} Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.169504 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-z8pb8" podStartSLOduration=13.169471262 podStartE2EDuration="13.169471262s" podCreationTimestamp="2026-01-27 12:49:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:49:48.155424163 +0000 UTC m=+1415.392452373" watchObservedRunningTime="2026-01-27 12:49:48.169471262 +0000 UTC m=+1415.406499472" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.173965 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5","Type":"ContainerStarted","Data":"05a50a9a845530e54c98704afd345c088488d5fa3f2d3fe72bec046926ca868d"} Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.174982 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-1" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.183091 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c76c04d4-a881-4504-a00f-3b227187edfa","Type":"ContainerStarted","Data":"2f57c7fc622f89c1fde5fd7157fa5ecec74927af707891651e0c7947e1e497f7"} Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.205456 4900 scope.go:117] "RemoveContainer" containerID="cbb57461ade85b8e9900dfb37df333dc4636221e9c9ccb3353807fb9047e2891" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.220766 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=53.341284841 podStartE2EDuration="1m9.220739907s" podCreationTimestamp="2026-01-27 12:48:39 +0000 UTC" firstStartedPulling="2026-01-27 12:48:49.918616232 +0000 UTC m=+1357.155644442" lastFinishedPulling="2026-01-27 12:49:05.798071298 +0000 UTC m=+1373.035099508" observedRunningTime="2026-01-27 12:49:48.218526482 +0000 UTC m=+1415.455554712" watchObservedRunningTime="2026-01-27 12:49:48.220739907 +0000 UTC m=+1415.457768117" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.240327 4900 scope.go:117] "RemoveContainer" containerID="2e8fdd581fcba7b2893113318b5cdf7cd75b33f2584d345e8a2dd922865e5ba5" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.268118 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-7e73-account-create-update-6fnzb" podStartSLOduration=9.268081817 podStartE2EDuration="9.268081817s" podCreationTimestamp="2026-01-27 12:49:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:49:48.245666983 +0000 UTC m=+1415.482695193" watchObservedRunningTime="2026-01-27 12:49:48.268081817 +0000 UTC m=+1415.505110027" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.273466 4900 scope.go:117] "RemoveContainer" containerID="780e66f5685639a9c3fcdf06e1e97c102d2ca0d6a15d6ea21db96a2e31d9a197" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.301213 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5d4d5db777-x4428"] Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.314266 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-5d4d5db777-x4428"] Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.318277 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=17.210312255 podStartE2EDuration="1m0.318253889s" podCreationTimestamp="2026-01-27 12:48:48 +0000 UTC" firstStartedPulling="2026-01-27 12:49:04.014981729 +0000 UTC m=+1371.252009939" lastFinishedPulling="2026-01-27 12:49:47.122923363 +0000 UTC m=+1414.359951573" observedRunningTime="2026-01-27 12:49:48.302215392 +0000 UTC m=+1415.539243612" watchObservedRunningTime="2026-01-27 12:49:48.318253889 +0000 UTC m=+1415.555282099" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.348936 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=53.222899889 podStartE2EDuration="1m9.348903403s" podCreationTimestamp="2026-01-27 12:48:39 +0000 UTC" firstStartedPulling="2026-01-27 12:48:49.94118973 +0000 UTC m=+1357.178217940" lastFinishedPulling="2026-01-27 12:49:06.067193244 +0000 UTC m=+1373.304221454" observedRunningTime="2026-01-27 12:49:48.342812425 +0000 UTC m=+1415.579840635" watchObservedRunningTime="2026-01-27 12:49:48.348903403 +0000 UTC m=+1415.585931623" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.388795 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b5p78"] Jan 27 12:49:48 crc kubenswrapper[4900]: W0127 12:49:48.543685 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod90bf0fe6_a714_419c_ab4a_46d0ece94652.slice/crio-d23d15a85a36e2ed4db86f1fc8f5f545ea8e4214d41567cce38a38f1ef9f5044 WatchSource:0}: Error finding container d23d15a85a36e2ed4db86f1fc8f5f545ea8e4214d41567cce38a38f1ef9f5044: Status 404 returned error can't find the container with id d23d15a85a36e2ed4db86f1fc8f5f545ea8e4214d41567cce38a38f1ef9f5044 Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.548043 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-1" podStartSLOduration=53.667498398 podStartE2EDuration="1m9.548010986s" podCreationTimestamp="2026-01-27 12:48:39 +0000 UTC" firstStartedPulling="2026-01-27 12:48:49.929642013 +0000 UTC m=+1357.166670223" lastFinishedPulling="2026-01-27 12:49:05.810154601 +0000 UTC m=+1373.047182811" observedRunningTime="2026-01-27 12:49:48.413587187 +0000 UTC m=+1415.650615397" watchObservedRunningTime="2026-01-27 12:49:48.548010986 +0000 UTC m=+1415.785039196" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.579221 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bc8176c-abeb-4ac1-90dc-d0c26939e6c3" path="/var/lib/kubelet/pods/0bc8176c-abeb-4ac1-90dc-d0c26939e6c3/volumes" Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.579970 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-b5p78"] Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.580014 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-1e0a-account-create-update-jft2t"] Jan 27 12:49:48 crc kubenswrapper[4900]: W0127 12:49:48.583723 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod261d14b0_20d4_45ec_ab1d_fdd704a6630b.slice/crio-8dfb905e6821d55b1aa2ce9f15eefe6866418ae0eb0189e18d4eda2fafcb68e4 WatchSource:0}: Error finding container 8dfb905e6821d55b1aa2ce9f15eefe6866418ae0eb0189e18d4eda2fafcb68e4: Status 404 returned error can't find the container with id 8dfb905e6821d55b1aa2ce9f15eefe6866418ae0eb0189e18d4eda2fafcb68e4 Jan 27 12:49:48 crc kubenswrapper[4900]: W0127 12:49:48.584815 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33ba5ed7_2f73_4fda_94cb_568e6a8c9843.slice/crio-2b366d4b93c1021fd19a833a5b7bff21d79d4fbc41d8462f473f73ef40e768c7 WatchSource:0}: Error finding container 2b366d4b93c1021fd19a833a5b7bff21d79d4fbc41d8462f473f73ef40e768c7: Status 404 returned error can't find the container with id 2b366d4b93c1021fd19a833a5b7bff21d79d4fbc41d8462f473f73ef40e768c7 Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.587489 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5ade-account-create-update-drsd8"] Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.598175 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-29z9h"] Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.607185 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-w6s6w"] Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.616481 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-dzlzf"] Jan 27 12:49:48 crc kubenswrapper[4900]: I0127 12:49:48.656773 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-qtmpr"] Jan 27 12:49:49 crc kubenswrapper[4900]: I0127 12:49:49.193202 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-qtmpr" event={"ID":"90bf0fe6-a714-419c-ab4a-46d0ece94652","Type":"ContainerStarted","Data":"d23d15a85a36e2ed4db86f1fc8f5f545ea8e4214d41567cce38a38f1ef9f5044"} Jan 27 12:49:49 crc kubenswrapper[4900]: I0127 12:49:49.197383 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5ade-account-create-update-drsd8" event={"ID":"c79b5e05-afb6-4a73-90d2-32beab5ba2d3","Type":"ContainerStarted","Data":"ce06fab8e02c6ca61074b23dece3fe073c03f7fcae45206a66c656a18631a58d"} Jan 27 12:49:49 crc kubenswrapper[4900]: I0127 12:49:49.199515 4900 generic.go:334] "Generic (PLEG): container finished" podID="02763fa3-4db9-4750-ba18-7a434e9cd831" containerID="7b373ed0c02d36b7ba775a9be8852008006162a5ea5e017f61db65917394590e" exitCode=0 Jan 27 12:49:49 crc kubenswrapper[4900]: I0127 12:49:49.199598 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-7e73-account-create-update-6fnzb" event={"ID":"02763fa3-4db9-4750-ba18-7a434e9cd831","Type":"ContainerDied","Data":"7b373ed0c02d36b7ba775a9be8852008006162a5ea5e017f61db65917394590e"} Jan 27 12:49:49 crc kubenswrapper[4900]: I0127 12:49:49.201497 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-dzlzf" event={"ID":"33ba5ed7-2f73-4fda-94cb-568e6a8c9843","Type":"ContainerStarted","Data":"2b366d4b93c1021fd19a833a5b7bff21d79d4fbc41d8462f473f73ef40e768c7"} Jan 27 12:49:49 crc kubenswrapper[4900]: I0127 12:49:49.204699 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-29z9h" event={"ID":"ee07a3a8-51d8-498e-ace9-c4ea774065fd","Type":"ContainerStarted","Data":"3915a9c14a157e67dc48784dc4e583b36ea9d69ecba47475cce284281a64e9e4"} Jan 27 12:49:49 crc kubenswrapper[4900]: I0127 12:49:49.207492 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-w6s6w" event={"ID":"261d14b0-20d4-45ec-ab1d-fdd704a6630b","Type":"ContainerStarted","Data":"8dfb905e6821d55b1aa2ce9f15eefe6866418ae0eb0189e18d4eda2fafcb68e4"} Jan 27 12:49:49 crc kubenswrapper[4900]: I0127 12:49:49.209393 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-1e0a-account-create-update-jft2t" event={"ID":"31ce1b9b-164b-45ad-b989-27e535bbdb8b","Type":"ContainerStarted","Data":"66fddc6f9867ae744051c06d1f664d9a3c959402e0b84c0676616f0b61c4b56d"} Jan 27 12:49:49 crc kubenswrapper[4900]: I0127 12:49:49.209448 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-1e0a-account-create-update-jft2t" event={"ID":"31ce1b9b-164b-45ad-b989-27e535bbdb8b","Type":"ContainerStarted","Data":"3ac693d924ccd40b3acaa05ac01354a4ce4705683cb185946615d60efff8f501"} Jan 27 12:49:49 crc kubenswrapper[4900]: I0127 12:49:49.752919 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 27 12:49:49 crc kubenswrapper[4900]: I0127 12:49:49.753253 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.221960 4900 generic.go:334] "Generic (PLEG): container finished" podID="c79b5e05-afb6-4a73-90d2-32beab5ba2d3" containerID="35ee5d2241fe7a1fe71203b8b74c11d5b957bc15fb75a7bfada360530e79b364" exitCode=0 Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.222049 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5ade-account-create-update-drsd8" event={"ID":"c79b5e05-afb6-4a73-90d2-32beab5ba2d3","Type":"ContainerDied","Data":"35ee5d2241fe7a1fe71203b8b74c11d5b957bc15fb75a7bfada360530e79b364"} Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.224444 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-29z9h" event={"ID":"ee07a3a8-51d8-498e-ace9-c4ea774065fd","Type":"ContainerStarted","Data":"dcc580548641865be2c637dfeec0215aea33ac37c34b70abfac8e1a5efd980d2"} Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.229033 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-w6s6w" event={"ID":"261d14b0-20d4-45ec-ab1d-fdd704a6630b","Type":"ContainerStarted","Data":"e6619e8476cd8fbe3647dd264d54c98dda8cdc5dc296d0fc5bfb11a469a47c86"} Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.232257 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-qtmpr" event={"ID":"90bf0fe6-a714-419c-ab4a-46d0ece94652","Type":"ContainerStarted","Data":"9eee290fb95365149a8fe071c30f618e894e87ff3fbd0514771216a845c4333a"} Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.260747 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-1e0a-account-create-update-jft2t" podStartSLOduration=7.260717834 podStartE2EDuration="7.260717834s" podCreationTimestamp="2026-01-27 12:49:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:49:49.26062301 +0000 UTC m=+1416.497651220" watchObservedRunningTime="2026-01-27 12:49:50.260717834 +0000 UTC m=+1417.497746044" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.283207 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-29z9h" podStartSLOduration=7.283177049 podStartE2EDuration="7.283177049s" podCreationTimestamp="2026-01-27 12:49:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:49:50.273706113 +0000 UTC m=+1417.510734333" watchObservedRunningTime="2026-01-27 12:49:50.283177049 +0000 UTC m=+1417.520205259" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.303884 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-w6s6w" podStartSLOduration=7.303851782 podStartE2EDuration="7.303851782s" podCreationTimestamp="2026-01-27 12:49:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:49:50.300236896 +0000 UTC m=+1417.537265116" watchObservedRunningTime="2026-01-27 12:49:50.303851782 +0000 UTC m=+1417.540879992" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.529278 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" path="/var/lib/kubelet/pods/1344dba3-f0d4-468c-ad6b-1653648f6017/volumes" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.865134 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-2" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.914342 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-7e73-account-create-update-6fnzb" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.918734 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-qtmpr" podStartSLOduration=10.918691655 podStartE2EDuration="10.918691655s" podCreationTimestamp="2026-01-27 12:49:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:49:50.332817166 +0000 UTC m=+1417.569845376" watchObservedRunningTime="2026-01-27 12:49:50.918691655 +0000 UTC m=+1418.155719865" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.928172 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg"] Jan 27 12:49:50 crc kubenswrapper[4900]: E0127 12:49:50.928759 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerName="registry-server" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.928797 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerName="registry-server" Jan 27 12:49:50 crc kubenswrapper[4900]: E0127 12:49:50.928820 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa278afe-1535-4c32-af1f-840d8df9dbe5" containerName="mariadb-database-create" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.928831 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa278afe-1535-4c32-af1f-840d8df9dbe5" containerName="mariadb-database-create" Jan 27 12:49:50 crc kubenswrapper[4900]: E0127 12:49:50.928846 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2794095-3d03-4cf0-8e7b-ecc39fb3db7a" containerName="mariadb-database-create" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.928853 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2794095-3d03-4cf0-8e7b-ecc39fb3db7a" containerName="mariadb-database-create" Jan 27 12:49:50 crc kubenswrapper[4900]: E0127 12:49:50.928873 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02763fa3-4db9-4750-ba18-7a434e9cd831" containerName="mariadb-account-create-update" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.928881 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="02763fa3-4db9-4750-ba18-7a434e9cd831" containerName="mariadb-account-create-update" Jan 27 12:49:50 crc kubenswrapper[4900]: E0127 12:49:50.928899 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerName="extract-utilities" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.928907 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerName="extract-utilities" Jan 27 12:49:50 crc kubenswrapper[4900]: E0127 12:49:50.928924 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d14e50e4-6c8c-4eac-af5d-3726019acdc9" containerName="mariadb-account-create-update" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.928931 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="d14e50e4-6c8c-4eac-af5d-3726019acdc9" containerName="mariadb-account-create-update" Jan 27 12:49:50 crc kubenswrapper[4900]: E0127 12:49:50.928948 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerName="extract-content" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.928958 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerName="extract-content" Jan 27 12:49:50 crc kubenswrapper[4900]: E0127 12:49:50.928970 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bc8176c-abeb-4ac1-90dc-d0c26939e6c3" containerName="console" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.928977 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bc8176c-abeb-4ac1-90dc-d0c26939e6c3" containerName="console" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.929281 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa278afe-1535-4c32-af1f-840d8df9dbe5" containerName="mariadb-database-create" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.929305 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="02763fa3-4db9-4750-ba18-7a434e9cd831" containerName="mariadb-account-create-update" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.929316 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="1344dba3-f0d4-468c-ad6b-1653648f6017" containerName="registry-server" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.929338 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2794095-3d03-4cf0-8e7b-ecc39fb3db7a" containerName="mariadb-database-create" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.929350 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bc8176c-abeb-4ac1-90dc-d0c26939e6c3" containerName="console" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.929357 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="d14e50e4-6c8c-4eac-af5d-3726019acdc9" containerName="mariadb-account-create-update" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.930478 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg" Jan 27 12:49:50 crc kubenswrapper[4900]: I0127 12:49:50.937611 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg"] Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.033743 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02763fa3-4db9-4750-ba18-7a434e9cd831-operator-scripts\") pod \"02763fa3-4db9-4750-ba18-7a434e9cd831\" (UID: \"02763fa3-4db9-4750-ba18-7a434e9cd831\") " Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.034157 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pm87p\" (UniqueName: \"kubernetes.io/projected/02763fa3-4db9-4750-ba18-7a434e9cd831-kube-api-access-pm87p\") pod \"02763fa3-4db9-4750-ba18-7a434e9cd831\" (UID: \"02763fa3-4db9-4750-ba18-7a434e9cd831\") " Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.034713 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qgjl8\" (UniqueName: \"kubernetes.io/projected/ad92fe8d-3cd2-4bac-8d4e-8e0590824e17-kube-api-access-qgjl8\") pod \"mysqld-exporter-openstack-cell1-db-create-wcmjg\" (UID: \"ad92fe8d-3cd2-4bac-8d4e-8e0590824e17\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.034829 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad92fe8d-3cd2-4bac-8d4e-8e0590824e17-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-wcmjg\" (UID: \"ad92fe8d-3cd2-4bac-8d4e-8e0590824e17\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.037791 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02763fa3-4db9-4750-ba18-7a434e9cd831-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "02763fa3-4db9-4750-ba18-7a434e9cd831" (UID: "02763fa3-4db9-4750-ba18-7a434e9cd831"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.085770 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02763fa3-4db9-4750-ba18-7a434e9cd831-kube-api-access-pm87p" (OuterVolumeSpecName: "kube-api-access-pm87p") pod "02763fa3-4db9-4750-ba18-7a434e9cd831" (UID: "02763fa3-4db9-4750-ba18-7a434e9cd831"). InnerVolumeSpecName "kube-api-access-pm87p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.136769 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qgjl8\" (UniqueName: \"kubernetes.io/projected/ad92fe8d-3cd2-4bac-8d4e-8e0590824e17-kube-api-access-qgjl8\") pod \"mysqld-exporter-openstack-cell1-db-create-wcmjg\" (UID: \"ad92fe8d-3cd2-4bac-8d4e-8e0590824e17\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.137144 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad92fe8d-3cd2-4bac-8d4e-8e0590824e17-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-wcmjg\" (UID: \"ad92fe8d-3cd2-4bac-8d4e-8e0590824e17\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.137259 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/02763fa3-4db9-4750-ba18-7a434e9cd831-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.137278 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pm87p\" (UniqueName: \"kubernetes.io/projected/02763fa3-4db9-4750-ba18-7a434e9cd831-kube-api-access-pm87p\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.138373 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad92fe8d-3cd2-4bac-8d4e-8e0590824e17-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-wcmjg\" (UID: \"ad92fe8d-3cd2-4bac-8d4e-8e0590824e17\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.208665 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qgjl8\" (UniqueName: \"kubernetes.io/projected/ad92fe8d-3cd2-4bac-8d4e-8e0590824e17-kube-api-access-qgjl8\") pod \"mysqld-exporter-openstack-cell1-db-create-wcmjg\" (UID: \"ad92fe8d-3cd2-4bac-8d4e-8e0590824e17\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.238555 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-accf-account-create-update-s6gmw"] Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.240504 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-accf-account-create-update-s6gmw" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.243860 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-cell1-db-secret" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.254112 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-accf-account-create-update-s6gmw"] Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.259839 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-1e0a-account-create-update-jft2t" event={"ID":"31ce1b9b-164b-45ad-b989-27e535bbdb8b","Type":"ContainerDied","Data":"66fddc6f9867ae744051c06d1f664d9a3c959402e0b84c0676616f0b61c4b56d"} Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.256686 4900 generic.go:334] "Generic (PLEG): container finished" podID="31ce1b9b-164b-45ad-b989-27e535bbdb8b" containerID="66fddc6f9867ae744051c06d1f664d9a3c959402e0b84c0676616f0b61c4b56d" exitCode=0 Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.254453 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.349364 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"2537092f-2211-4329-afe3-1e15bdd14256","Type":"ContainerStarted","Data":"1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5"} Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.351503 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4qcn\" (UniqueName: \"kubernetes.io/projected/fd31604a-235d-497a-a5fb-00c2928a0954-kube-api-access-b4qcn\") pod \"mysqld-exporter-accf-account-create-update-s6gmw\" (UID: \"fd31604a-235d-497a-a5fb-00c2928a0954\") " pod="openstack/mysqld-exporter-accf-account-create-update-s6gmw" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.351609 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd31604a-235d-497a-a5fb-00c2928a0954-operator-scripts\") pod \"mysqld-exporter-accf-account-create-update-s6gmw\" (UID: \"fd31604a-235d-497a-a5fb-00c2928a0954\") " pod="openstack/mysqld-exporter-accf-account-create-update-s6gmw" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.358902 4900 generic.go:334] "Generic (PLEG): container finished" podID="ee07a3a8-51d8-498e-ace9-c4ea774065fd" containerID="dcc580548641865be2c637dfeec0215aea33ac37c34b70abfac8e1a5efd980d2" exitCode=0 Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.360224 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-29z9h" event={"ID":"ee07a3a8-51d8-498e-ace9-c4ea774065fd","Type":"ContainerDied","Data":"dcc580548641865be2c637dfeec0215aea33ac37c34b70abfac8e1a5efd980d2"} Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.363571 4900 generic.go:334] "Generic (PLEG): container finished" podID="261d14b0-20d4-45ec-ab1d-fdd704a6630b" containerID="e6619e8476cd8fbe3647dd264d54c98dda8cdc5dc296d0fc5bfb11a469a47c86" exitCode=0 Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.363625 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-w6s6w" event={"ID":"261d14b0-20d4-45ec-ab1d-fdd704a6630b","Type":"ContainerDied","Data":"e6619e8476cd8fbe3647dd264d54c98dda8cdc5dc296d0fc5bfb11a469a47c86"} Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.371732 4900 generic.go:334] "Generic (PLEG): container finished" podID="90bf0fe6-a714-419c-ab4a-46d0ece94652" containerID="9eee290fb95365149a8fe071c30f618e894e87ff3fbd0514771216a845c4333a" exitCode=0 Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.371806 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-qtmpr" event={"ID":"90bf0fe6-a714-419c-ab4a-46d0ece94652","Type":"ContainerDied","Data":"9eee290fb95365149a8fe071c30f618e894e87ff3fbd0514771216a845c4333a"} Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.375921 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-7e73-account-create-update-6fnzb" event={"ID":"02763fa3-4db9-4750-ba18-7a434e9cd831","Type":"ContainerDied","Data":"e482cdf7d7d06f7633b618c05b85d14faba039138e0126ad8965d09c3289b8cc"} Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.376045 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e482cdf7d7d06f7633b618c05b85d14faba039138e0126ad8965d09c3289b8cc" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.377693 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-7e73-account-create-update-6fnzb" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.453912 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4qcn\" (UniqueName: \"kubernetes.io/projected/fd31604a-235d-497a-a5fb-00c2928a0954-kube-api-access-b4qcn\") pod \"mysqld-exporter-accf-account-create-update-s6gmw\" (UID: \"fd31604a-235d-497a-a5fb-00c2928a0954\") " pod="openstack/mysqld-exporter-accf-account-create-update-s6gmw" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.454467 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd31604a-235d-497a-a5fb-00c2928a0954-operator-scripts\") pod \"mysqld-exporter-accf-account-create-update-s6gmw\" (UID: \"fd31604a-235d-497a-a5fb-00c2928a0954\") " pod="openstack/mysqld-exporter-accf-account-create-update-s6gmw" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.455340 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd31604a-235d-497a-a5fb-00c2928a0954-operator-scripts\") pod \"mysqld-exporter-accf-account-create-update-s6gmw\" (UID: \"fd31604a-235d-497a-a5fb-00c2928a0954\") " pod="openstack/mysqld-exporter-accf-account-create-update-s6gmw" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.488326 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4qcn\" (UniqueName: \"kubernetes.io/projected/fd31604a-235d-497a-a5fb-00c2928a0954-kube-api-access-b4qcn\") pod \"mysqld-exporter-accf-account-create-update-s6gmw\" (UID: \"fd31604a-235d-497a-a5fb-00c2928a0954\") " pod="openstack/mysqld-exporter-accf-account-create-update-s6gmw" Jan 27 12:49:51 crc kubenswrapper[4900]: I0127 12:49:51.640008 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-accf-account-create-update-s6gmw" Jan 27 12:49:52 crc kubenswrapper[4900]: I0127 12:49:52.812384 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 27 12:49:52 crc kubenswrapper[4900]: I0127 12:49:52.886914 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.125097 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:49:53 crc kubenswrapper[4900]: E0127 12:49:53.125483 4900 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 27 12:49:53 crc kubenswrapper[4900]: E0127 12:49:53.125504 4900 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 27 12:49:53 crc kubenswrapper[4900]: E0127 12:49:53.125568 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift podName:0c2f90a4-baa0-4eeb-a797-3664c306818b nodeName:}" failed. No retries permitted until 2026-01-27 12:50:09.125544178 +0000 UTC m=+1436.362572388 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift") pod "swift-storage-0" (UID: "0c2f90a4-baa0-4eeb-a797-3664c306818b") : configmap "swift-ring-files" not found Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.483380 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.488968 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.492574 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.494110 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-c9t6s" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.494275 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.494468 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.520539 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.545700 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c5578d3a-7a63-42bb-bd05-499cc28ed723-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.545779 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5578d3a-7a63-42bb-bd05-499cc28ed723-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.545846 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxgpk\" (UniqueName: \"kubernetes.io/projected/c5578d3a-7a63-42bb-bd05-499cc28ed723-kube-api-access-nxgpk\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.545863 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5578d3a-7a63-42bb-bd05-499cc28ed723-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.549956 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5578d3a-7a63-42bb-bd05-499cc28ed723-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.550042 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5578d3a-7a63-42bb-bd05-499cc28ed723-config\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.550182 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5578d3a-7a63-42bb-bd05-499cc28ed723-scripts\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.657518 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c5578d3a-7a63-42bb-bd05-499cc28ed723-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.658478 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c5578d3a-7a63-42bb-bd05-499cc28ed723-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.658686 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5578d3a-7a63-42bb-bd05-499cc28ed723-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.658906 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxgpk\" (UniqueName: \"kubernetes.io/projected/c5578d3a-7a63-42bb-bd05-499cc28ed723-kube-api-access-nxgpk\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.658942 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5578d3a-7a63-42bb-bd05-499cc28ed723-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.659203 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5578d3a-7a63-42bb-bd05-499cc28ed723-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.659272 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5578d3a-7a63-42bb-bd05-499cc28ed723-config\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.659406 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5578d3a-7a63-42bb-bd05-499cc28ed723-scripts\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.660519 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5578d3a-7a63-42bb-bd05-499cc28ed723-scripts\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.665365 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5578d3a-7a63-42bb-bd05-499cc28ed723-config\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.672024 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5578d3a-7a63-42bb-bd05-499cc28ed723-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.673423 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5578d3a-7a63-42bb-bd05-499cc28ed723-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.683930 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5578d3a-7a63-42bb-bd05-499cc28ed723-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.717084 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxgpk\" (UniqueName: \"kubernetes.io/projected/c5578d3a-7a63-42bb-bd05-499cc28ed723-kube-api-access-nxgpk\") pod \"ovn-northd-0\" (UID: \"c5578d3a-7a63-42bb-bd05-499cc28ed723\") " pod="openstack/ovn-northd-0" Jan 27 12:49:53 crc kubenswrapper[4900]: I0127 12:49:53.811262 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.162874 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-qtmpr" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.181252 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-1e0a-account-create-update-jft2t" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.183346 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5ade-account-create-update-drsd8" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.199469 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-29z9h" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.219307 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-w6s6w" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.281713 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4z8bl\" (UniqueName: \"kubernetes.io/projected/c79b5e05-afb6-4a73-90d2-32beab5ba2d3-kube-api-access-4z8bl\") pod \"c79b5e05-afb6-4a73-90d2-32beab5ba2d3\" (UID: \"c79b5e05-afb6-4a73-90d2-32beab5ba2d3\") " Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.292190 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqzcd\" (UniqueName: \"kubernetes.io/projected/31ce1b9b-164b-45ad-b989-27e535bbdb8b-kube-api-access-zqzcd\") pod \"31ce1b9b-164b-45ad-b989-27e535bbdb8b\" (UID: \"31ce1b9b-164b-45ad-b989-27e535bbdb8b\") " Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.292316 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8qr9\" (UniqueName: \"kubernetes.io/projected/261d14b0-20d4-45ec-ab1d-fdd704a6630b-kube-api-access-j8qr9\") pod \"261d14b0-20d4-45ec-ab1d-fdd704a6630b\" (UID: \"261d14b0-20d4-45ec-ab1d-fdd704a6630b\") " Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.292445 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82wc7\" (UniqueName: \"kubernetes.io/projected/90bf0fe6-a714-419c-ab4a-46d0ece94652-kube-api-access-82wc7\") pod \"90bf0fe6-a714-419c-ab4a-46d0ece94652\" (UID: \"90bf0fe6-a714-419c-ab4a-46d0ece94652\") " Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.292522 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/261d14b0-20d4-45ec-ab1d-fdd704a6630b-operator-scripts\") pod \"261d14b0-20d4-45ec-ab1d-fdd704a6630b\" (UID: \"261d14b0-20d4-45ec-ab1d-fdd704a6630b\") " Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.292542 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31ce1b9b-164b-45ad-b989-27e535bbdb8b-operator-scripts\") pod \"31ce1b9b-164b-45ad-b989-27e535bbdb8b\" (UID: \"31ce1b9b-164b-45ad-b989-27e535bbdb8b\") " Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.292567 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c79b5e05-afb6-4a73-90d2-32beab5ba2d3-operator-scripts\") pod \"c79b5e05-afb6-4a73-90d2-32beab5ba2d3\" (UID: \"c79b5e05-afb6-4a73-90d2-32beab5ba2d3\") " Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.292607 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90bf0fe6-a714-419c-ab4a-46d0ece94652-operator-scripts\") pod \"90bf0fe6-a714-419c-ab4a-46d0ece94652\" (UID: \"90bf0fe6-a714-419c-ab4a-46d0ece94652\") " Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.292641 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee07a3a8-51d8-498e-ace9-c4ea774065fd-operator-scripts\") pod \"ee07a3a8-51d8-498e-ace9-c4ea774065fd\" (UID: \"ee07a3a8-51d8-498e-ace9-c4ea774065fd\") " Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.292661 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gw98f\" (UniqueName: \"kubernetes.io/projected/ee07a3a8-51d8-498e-ace9-c4ea774065fd-kube-api-access-gw98f\") pod \"ee07a3a8-51d8-498e-ace9-c4ea774065fd\" (UID: \"ee07a3a8-51d8-498e-ace9-c4ea774065fd\") " Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.289410 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c79b5e05-afb6-4a73-90d2-32beab5ba2d3-kube-api-access-4z8bl" (OuterVolumeSpecName: "kube-api-access-4z8bl") pod "c79b5e05-afb6-4a73-90d2-32beab5ba2d3" (UID: "c79b5e05-afb6-4a73-90d2-32beab5ba2d3"). InnerVolumeSpecName "kube-api-access-4z8bl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.294430 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31ce1b9b-164b-45ad-b989-27e535bbdb8b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "31ce1b9b-164b-45ad-b989-27e535bbdb8b" (UID: "31ce1b9b-164b-45ad-b989-27e535bbdb8b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.296214 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c79b5e05-afb6-4a73-90d2-32beab5ba2d3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c79b5e05-afb6-4a73-90d2-32beab5ba2d3" (UID: "c79b5e05-afb6-4a73-90d2-32beab5ba2d3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.296626 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90bf0fe6-a714-419c-ab4a-46d0ece94652-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "90bf0fe6-a714-419c-ab4a-46d0ece94652" (UID: "90bf0fe6-a714-419c-ab4a-46d0ece94652"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.297088 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee07a3a8-51d8-498e-ace9-c4ea774065fd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ee07a3a8-51d8-498e-ace9-c4ea774065fd" (UID: "ee07a3a8-51d8-498e-ace9-c4ea774065fd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.297639 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/261d14b0-20d4-45ec-ab1d-fdd704a6630b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "261d14b0-20d4-45ec-ab1d-fdd704a6630b" (UID: "261d14b0-20d4-45ec-ab1d-fdd704a6630b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.299164 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/261d14b0-20d4-45ec-ab1d-fdd704a6630b-kube-api-access-j8qr9" (OuterVolumeSpecName: "kube-api-access-j8qr9") pod "261d14b0-20d4-45ec-ab1d-fdd704a6630b" (UID: "261d14b0-20d4-45ec-ab1d-fdd704a6630b"). InnerVolumeSpecName "kube-api-access-j8qr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.304367 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90bf0fe6-a714-419c-ab4a-46d0ece94652-kube-api-access-82wc7" (OuterVolumeSpecName: "kube-api-access-82wc7") pod "90bf0fe6-a714-419c-ab4a-46d0ece94652" (UID: "90bf0fe6-a714-419c-ab4a-46d0ece94652"). InnerVolumeSpecName "kube-api-access-82wc7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.328164 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee07a3a8-51d8-498e-ace9-c4ea774065fd-kube-api-access-gw98f" (OuterVolumeSpecName: "kube-api-access-gw98f") pod "ee07a3a8-51d8-498e-ace9-c4ea774065fd" (UID: "ee07a3a8-51d8-498e-ace9-c4ea774065fd"). InnerVolumeSpecName "kube-api-access-gw98f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.328375 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31ce1b9b-164b-45ad-b989-27e535bbdb8b-kube-api-access-zqzcd" (OuterVolumeSpecName: "kube-api-access-zqzcd") pod "31ce1b9b-164b-45ad-b989-27e535bbdb8b" (UID: "31ce1b9b-164b-45ad-b989-27e535bbdb8b"). InnerVolumeSpecName "kube-api-access-zqzcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.406236 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/261d14b0-20d4-45ec-ab1d-fdd704a6630b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.406292 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31ce1b9b-164b-45ad-b989-27e535bbdb8b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.406302 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c79b5e05-afb6-4a73-90d2-32beab5ba2d3-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.406311 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90bf0fe6-a714-419c-ab4a-46d0ece94652-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.406320 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee07a3a8-51d8-498e-ace9-c4ea774065fd-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.406330 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gw98f\" (UniqueName: \"kubernetes.io/projected/ee07a3a8-51d8-498e-ace9-c4ea774065fd-kube-api-access-gw98f\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.406342 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4z8bl\" (UniqueName: \"kubernetes.io/projected/c79b5e05-afb6-4a73-90d2-32beab5ba2d3-kube-api-access-4z8bl\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.406352 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqzcd\" (UniqueName: \"kubernetes.io/projected/31ce1b9b-164b-45ad-b989-27e535bbdb8b-kube-api-access-zqzcd\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.406364 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8qr9\" (UniqueName: \"kubernetes.io/projected/261d14b0-20d4-45ec-ab1d-fdd704a6630b-kube-api-access-j8qr9\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.406373 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82wc7\" (UniqueName: \"kubernetes.io/projected/90bf0fe6-a714-419c-ab4a-46d0ece94652-kube-api-access-82wc7\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.413315 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-5rc2l"] Jan 27 12:49:54 crc kubenswrapper[4900]: E0127 12:49:54.414230 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31ce1b9b-164b-45ad-b989-27e535bbdb8b" containerName="mariadb-account-create-update" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.414250 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="31ce1b9b-164b-45ad-b989-27e535bbdb8b" containerName="mariadb-account-create-update" Jan 27 12:49:54 crc kubenswrapper[4900]: E0127 12:49:54.414271 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="261d14b0-20d4-45ec-ab1d-fdd704a6630b" containerName="mariadb-database-create" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.414283 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="261d14b0-20d4-45ec-ab1d-fdd704a6630b" containerName="mariadb-database-create" Jan 27 12:49:54 crc kubenswrapper[4900]: E0127 12:49:54.414293 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90bf0fe6-a714-419c-ab4a-46d0ece94652" containerName="mariadb-account-create-update" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.414300 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="90bf0fe6-a714-419c-ab4a-46d0ece94652" containerName="mariadb-account-create-update" Jan 27 12:49:54 crc kubenswrapper[4900]: E0127 12:49:54.414351 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c79b5e05-afb6-4a73-90d2-32beab5ba2d3" containerName="mariadb-account-create-update" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.414357 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="c79b5e05-afb6-4a73-90d2-32beab5ba2d3" containerName="mariadb-account-create-update" Jan 27 12:49:54 crc kubenswrapper[4900]: E0127 12:49:54.414367 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee07a3a8-51d8-498e-ace9-c4ea774065fd" containerName="mariadb-database-create" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.414373 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee07a3a8-51d8-498e-ace9-c4ea774065fd" containerName="mariadb-database-create" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.414615 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="261d14b0-20d4-45ec-ab1d-fdd704a6630b" containerName="mariadb-database-create" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.414632 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="c79b5e05-afb6-4a73-90d2-32beab5ba2d3" containerName="mariadb-account-create-update" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.414645 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee07a3a8-51d8-498e-ace9-c4ea774065fd" containerName="mariadb-database-create" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.414666 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="31ce1b9b-164b-45ad-b989-27e535bbdb8b" containerName="mariadb-account-create-update" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.414677 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="90bf0fe6-a714-419c-ab4a-46d0ece94652" containerName="mariadb-account-create-update" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.415642 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5rc2l" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.423317 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.431248 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-6vqzq" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.438157 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5ade-account-create-update-drsd8" event={"ID":"c79b5e05-afb6-4a73-90d2-32beab5ba2d3","Type":"ContainerDied","Data":"ce06fab8e02c6ca61074b23dece3fe073c03f7fcae45206a66c656a18631a58d"} Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.438216 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce06fab8e02c6ca61074b23dece3fe073c03f7fcae45206a66c656a18631a58d" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.438298 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5ade-account-create-update-drsd8" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.444602 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-1e0a-account-create-update-jft2t" event={"ID":"31ce1b9b-164b-45ad-b989-27e535bbdb8b","Type":"ContainerDied","Data":"3ac693d924ccd40b3acaa05ac01354a4ce4705683cb185946615d60efff8f501"} Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.444658 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3ac693d924ccd40b3acaa05ac01354a4ce4705683cb185946615d60efff8f501" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.444747 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-1e0a-account-create-update-jft2t" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.462727 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-29z9h" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.464449 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-5rc2l"] Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.464519 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-29z9h" event={"ID":"ee07a3a8-51d8-498e-ace9-c4ea774065fd","Type":"ContainerDied","Data":"3915a9c14a157e67dc48784dc4e583b36ea9d69ecba47475cce284281a64e9e4"} Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.464561 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3915a9c14a157e67dc48784dc4e583b36ea9d69ecba47475cce284281a64e9e4" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.474749 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-w6s6w" event={"ID":"261d14b0-20d4-45ec-ab1d-fdd704a6630b","Type":"ContainerDied","Data":"8dfb905e6821d55b1aa2ce9f15eefe6866418ae0eb0189e18d4eda2fafcb68e4"} Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.474803 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8dfb905e6821d55b1aa2ce9f15eefe6866418ae0eb0189e18d4eda2fafcb68e4" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.474889 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-w6s6w" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.493350 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-qtmpr" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.508043 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cncb\" (UniqueName: \"kubernetes.io/projected/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-kube-api-access-5cncb\") pod \"glance-db-sync-5rc2l\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " pod="openstack/glance-db-sync-5rc2l" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.508453 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-config-data\") pod \"glance-db-sync-5rc2l\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " pod="openstack/glance-db-sync-5rc2l" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.508631 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-db-sync-config-data\") pod \"glance-db-sync-5rc2l\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " pod="openstack/glance-db-sync-5rc2l" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.508736 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-combined-ca-bundle\") pod \"glance-db-sync-5rc2l\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " pod="openstack/glance-db-sync-5rc2l" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.590670 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-qtmpr" event={"ID":"90bf0fe6-a714-419c-ab4a-46d0ece94652","Type":"ContainerDied","Data":"d23d15a85a36e2ed4db86f1fc8f5f545ea8e4214d41567cce38a38f1ef9f5044"} Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.590709 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d23d15a85a36e2ed4db86f1fc8f5f545ea8e4214d41567cce38a38f1ef9f5044" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.612070 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-config-data\") pod \"glance-db-sync-5rc2l\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " pod="openstack/glance-db-sync-5rc2l" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.612172 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-db-sync-config-data\") pod \"glance-db-sync-5rc2l\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " pod="openstack/glance-db-sync-5rc2l" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.612239 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-combined-ca-bundle\") pod \"glance-db-sync-5rc2l\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " pod="openstack/glance-db-sync-5rc2l" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.612427 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cncb\" (UniqueName: \"kubernetes.io/projected/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-kube-api-access-5cncb\") pod \"glance-db-sync-5rc2l\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " pod="openstack/glance-db-sync-5rc2l" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.615950 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-db-sync-config-data\") pod \"glance-db-sync-5rc2l\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " pod="openstack/glance-db-sync-5rc2l" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.616329 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-config-data\") pod \"glance-db-sync-5rc2l\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " pod="openstack/glance-db-sync-5rc2l" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.617982 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-combined-ca-bundle\") pod \"glance-db-sync-5rc2l\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " pod="openstack/glance-db-sync-5rc2l" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.637729 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cncb\" (UniqueName: \"kubernetes.io/projected/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-kube-api-access-5cncb\") pod \"glance-db-sync-5rc2l\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " pod="openstack/glance-db-sync-5rc2l" Jan 27 12:49:54 crc kubenswrapper[4900]: I0127 12:49:54.752728 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5rc2l" Jan 27 12:49:56 crc kubenswrapper[4900]: I0127 12:49:56.082690 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:49:56 crc kubenswrapper[4900]: I0127 12:49:56.232173 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-m4f5x"] Jan 27 12:49:56 crc kubenswrapper[4900]: I0127 12:49:56.232441 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" podUID="596b98f0-2b51-4a1e-ae50-a16dc5097101" containerName="dnsmasq-dns" containerID="cri-o://0e5f72475e6f7ad448b7dce1c86db1d882f65c3df7e5ae151d5a54cf5a61fb1d" gracePeriod=10 Jan 27 12:49:57 crc kubenswrapper[4900]: I0127 12:49:57.165893 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-qtmpr"] Jan 27 12:49:57 crc kubenswrapper[4900]: I0127 12:49:57.180251 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-qtmpr"] Jan 27 12:49:57 crc kubenswrapper[4900]: I0127 12:49:57.535593 4900 generic.go:334] "Generic (PLEG): container finished" podID="596b98f0-2b51-4a1e-ae50-a16dc5097101" containerID="0e5f72475e6f7ad448b7dce1c86db1d882f65c3df7e5ae151d5a54cf5a61fb1d" exitCode=0 Jan 27 12:49:57 crc kubenswrapper[4900]: I0127 12:49:57.535665 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" event={"ID":"596b98f0-2b51-4a1e-ae50-a16dc5097101","Type":"ContainerDied","Data":"0e5f72475e6f7ad448b7dce1c86db1d882f65c3df7e5ae151d5a54cf5a61fb1d"} Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.383332 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-k9z2g" podUID="c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75" containerName="ovn-controller" probeResult="failure" output=< Jan 27 12:49:58 crc kubenswrapper[4900]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 27 12:49:58 crc kubenswrapper[4900]: > Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.411017 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.412893 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-t4b4c" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.506594 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90bf0fe6-a714-419c-ab4a-46d0ece94652" path="/var/lib/kubelet/pods/90bf0fe6-a714-419c-ab4a-46d0ece94652/volumes" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.689758 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-k9z2g-config-2pr6v"] Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.691653 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.710261 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.719677 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-k9z2g-config-2pr6v"] Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.824616 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-run\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.824672 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8998b37b-6c92-4227-82fe-d1ae56a26ee8-scripts\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.824903 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-log-ovn\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.824978 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bprkm\" (UniqueName: \"kubernetes.io/projected/8998b37b-6c92-4227-82fe-d1ae56a26ee8-kube-api-access-bprkm\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.825313 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8998b37b-6c92-4227-82fe-d1ae56a26ee8-additional-scripts\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.825381 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-run-ovn\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.930696 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-run\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.931237 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8998b37b-6c92-4227-82fe-d1ae56a26ee8-scripts\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.931355 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-log-ovn\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.931405 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bprkm\" (UniqueName: \"kubernetes.io/projected/8998b37b-6c92-4227-82fe-d1ae56a26ee8-kube-api-access-bprkm\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.931594 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8998b37b-6c92-4227-82fe-d1ae56a26ee8-additional-scripts\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.931655 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-run-ovn\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.932106 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-run-ovn\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.932170 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-run\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.939405 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8998b37b-6c92-4227-82fe-d1ae56a26ee8-scripts\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.939545 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-log-ovn\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.942283 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8998b37b-6c92-4227-82fe-d1ae56a26ee8-additional-scripts\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:58 crc kubenswrapper[4900]: I0127 12:49:58.973831 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bprkm\" (UniqueName: \"kubernetes.io/projected/8998b37b-6c92-4227-82fe-d1ae56a26ee8-kube-api-access-bprkm\") pod \"ovn-controller-k9z2g-config-2pr6v\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.015112 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.051166 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.138806 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-config\") pod \"596b98f0-2b51-4a1e-ae50-a16dc5097101\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.139353 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-dns-svc\") pod \"596b98f0-2b51-4a1e-ae50-a16dc5097101\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.139548 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-ovsdbserver-sb\") pod \"596b98f0-2b51-4a1e-ae50-a16dc5097101\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.139613 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4b9c\" (UniqueName: \"kubernetes.io/projected/596b98f0-2b51-4a1e-ae50-a16dc5097101-kube-api-access-m4b9c\") pod \"596b98f0-2b51-4a1e-ae50-a16dc5097101\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.139859 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-ovsdbserver-nb\") pod \"596b98f0-2b51-4a1e-ae50-a16dc5097101\" (UID: \"596b98f0-2b51-4a1e-ae50-a16dc5097101\") " Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.211507 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/596b98f0-2b51-4a1e-ae50-a16dc5097101-kube-api-access-m4b9c" (OuterVolumeSpecName: "kube-api-access-m4b9c") pod "596b98f0-2b51-4a1e-ae50-a16dc5097101" (UID: "596b98f0-2b51-4a1e-ae50-a16dc5097101"). InnerVolumeSpecName "kube-api-access-m4b9c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.245305 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4b9c\" (UniqueName: \"kubernetes.io/projected/596b98f0-2b51-4a1e-ae50-a16dc5097101-kube-api-access-m4b9c\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.430903 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-config" (OuterVolumeSpecName: "config") pod "596b98f0-2b51-4a1e-ae50-a16dc5097101" (UID: "596b98f0-2b51-4a1e-ae50-a16dc5097101"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.450407 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.461711 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "596b98f0-2b51-4a1e-ae50-a16dc5097101" (UID: "596b98f0-2b51-4a1e-ae50-a16dc5097101"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.470510 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "596b98f0-2b51-4a1e-ae50-a16dc5097101" (UID: "596b98f0-2b51-4a1e-ae50-a16dc5097101"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.471914 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.474153 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "596b98f0-2b51-4a1e-ae50-a16dc5097101" (UID: "596b98f0-2b51-4a1e-ae50-a16dc5097101"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.553008 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.553304 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.553415 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/596b98f0-2b51-4a1e-ae50-a16dc5097101-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.569172 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c5578d3a-7a63-42bb-bd05-499cc28ed723","Type":"ContainerStarted","Data":"6bf241e78d003c6ff41fcee4166995bf41f7718676328bcd69c54c2795b8cf27"} Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.583113 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"2537092f-2211-4329-afe3-1e15bdd14256","Type":"ContainerStarted","Data":"615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a"} Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.598788 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" event={"ID":"596b98f0-2b51-4a1e-ae50-a16dc5097101","Type":"ContainerDied","Data":"c983f102fc9a049979d33b1885451c1fce029e500f7589b6a082b110987b5357"} Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.598860 4900 scope.go:117] "RemoveContainer" containerID="0e5f72475e6f7ad448b7dce1c86db1d882f65c3df7e5ae151d5a54cf5a61fb1d" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.599075 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-m4f5x" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.685376 4900 scope.go:117] "RemoveContainer" containerID="0fb6d5d4722cfcdbdb4c63bedd7e5b419ade21989ab569b6265732e123b56530" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.704492 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=23.260620095 podStartE2EDuration="1m14.704464812s" podCreationTimestamp="2026-01-27 12:48:45 +0000 UTC" firstStartedPulling="2026-01-27 12:49:07.34794201 +0000 UTC m=+1374.584970220" lastFinishedPulling="2026-01-27 12:49:58.791786717 +0000 UTC m=+1426.028814937" observedRunningTime="2026-01-27 12:49:59.69033755 +0000 UTC m=+1426.927365760" watchObservedRunningTime="2026-01-27 12:49:59.704464812 +0000 UTC m=+1426.941493022" Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.726096 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-m4f5x"] Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.743151 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-m4f5x"] Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.760787 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg"] Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.809674 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-accf-account-create-update-s6gmw"] Jan 27 12:49:59 crc kubenswrapper[4900]: W0127 12:49:59.827446 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd31604a_235d_497a_a5fb_00c2928a0954.slice/crio-c1d60489c124e7e1a7468dd5218253d66ebfb399d393f7328a2fb879596923f7 WatchSource:0}: Error finding container c1d60489c124e7e1a7468dd5218253d66ebfb399d393f7328a2fb879596923f7: Status 404 returned error can't find the container with id c1d60489c124e7e1a7468dd5218253d66ebfb399d393f7328a2fb879596923f7 Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.842401 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-5rc2l"] Jan 27 12:49:59 crc kubenswrapper[4900]: I0127 12:49:59.895817 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-k9z2g-config-2pr6v"] Jan 27 12:49:59 crc kubenswrapper[4900]: W0127 12:49:59.903721 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8998b37b_6c92_4227_82fe_d1ae56a26ee8.slice/crio-42030099bef283c1a196830f4d33048c77b7a4c2802d91a65f079b2b948d61f5 WatchSource:0}: Error finding container 42030099bef283c1a196830f4d33048c77b7a4c2802d91a65f079b2b948d61f5: Status 404 returned error can't find the container with id 42030099bef283c1a196830f4d33048c77b7a4c2802d91a65f079b2b948d61f5 Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.500278 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="596b98f0-2b51-4a1e-ae50-a16dc5097101" path="/var/lib/kubelet/pods/596b98f0-2b51-4a1e-ae50-a16dc5097101/volumes" Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.640557 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-k9z2g-config-2pr6v" event={"ID":"8998b37b-6c92-4227-82fe-d1ae56a26ee8","Type":"ContainerStarted","Data":"fa61a06eeafaa4aebbc5a3252fd558c86a4604d5db84faa144eba4f9eb23e47b"} Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.640628 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-k9z2g-config-2pr6v" event={"ID":"8998b37b-6c92-4227-82fe-d1ae56a26ee8","Type":"ContainerStarted","Data":"42030099bef283c1a196830f4d33048c77b7a4c2802d91a65f079b2b948d61f5"} Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.644051 4900 generic.go:334] "Generic (PLEG): container finished" podID="fd31604a-235d-497a-a5fb-00c2928a0954" containerID="f28eb2dc00c93b8d7274fe9fd73951a031d994477730a4a40fb722418b6342e5" exitCode=0 Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.644133 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-accf-account-create-update-s6gmw" event={"ID":"fd31604a-235d-497a-a5fb-00c2928a0954","Type":"ContainerDied","Data":"f28eb2dc00c93b8d7274fe9fd73951a031d994477730a4a40fb722418b6342e5"} Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.644166 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-accf-account-create-update-s6gmw" event={"ID":"fd31604a-235d-497a-a5fb-00c2928a0954","Type":"ContainerStarted","Data":"c1d60489c124e7e1a7468dd5218253d66ebfb399d393f7328a2fb879596923f7"} Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.646135 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5rc2l" event={"ID":"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4","Type":"ContainerStarted","Data":"1a690051342ea055ee2054665a6f8d7ddfc6985242f5849445c91becdcd971ef"} Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.649395 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-dzlzf" event={"ID":"33ba5ed7-2f73-4fda-94cb-568e6a8c9843","Type":"ContainerStarted","Data":"e4e04db5e11aa038069db42d6647c20aae47621f2214403a482c17492ec28460"} Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.652789 4900 generic.go:334] "Generic (PLEG): container finished" podID="ad92fe8d-3cd2-4bac-8d4e-8e0590824e17" containerID="750239e6f863bd37bf3a65af23ef7bbad093e351d2e4d571498c10d1dd39cf26" exitCode=0 Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.653706 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg" event={"ID":"ad92fe8d-3cd2-4bac-8d4e-8e0590824e17","Type":"ContainerDied","Data":"750239e6f863bd37bf3a65af23ef7bbad093e351d2e4d571498c10d1dd39cf26"} Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.653739 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg" event={"ID":"ad92fe8d-3cd2-4bac-8d4e-8e0590824e17","Type":"ContainerStarted","Data":"e62e38c31190c151d2ada67c1d1dd75eaf85bf049740770f504818d6d2d14768"} Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.663392 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-k9z2g-config-2pr6v" podStartSLOduration=2.6633750259999998 podStartE2EDuration="2.663375026s" podCreationTimestamp="2026-01-27 12:49:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:50:00.661331876 +0000 UTC m=+1427.898360086" watchObservedRunningTime="2026-01-27 12:50:00.663375026 +0000 UTC m=+1427.900403236" Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.743550 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-dzlzf" podStartSLOduration=10.53950374 podStartE2EDuration="20.743519642s" podCreationTimestamp="2026-01-27 12:49:40 +0000 UTC" firstStartedPulling="2026-01-27 12:49:48.592651677 +0000 UTC m=+1415.829679887" lastFinishedPulling="2026-01-27 12:49:58.796667579 +0000 UTC m=+1426.033695789" observedRunningTime="2026-01-27 12:50:00.733870201 +0000 UTC m=+1427.970898411" watchObservedRunningTime="2026-01-27 12:50:00.743519642 +0000 UTC m=+1427.980547862" Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.828493 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="ca415683-2d53-4bdc-b9f7-c98610a65cc3" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.867312 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Jan 27 12:50:00 crc kubenswrapper[4900]: I0127 12:50:00.897763 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-1" podUID="b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.131:5671: connect: connection refused" Jan 27 12:50:01 crc kubenswrapper[4900]: I0127 12:50:01.299799 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="c4594c71-599f-4576-bf95-303da1436ca4" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.132:5671: connect: connection refused" Jan 27 12:50:01 crc kubenswrapper[4900]: I0127 12:50:01.667718 4900 generic.go:334] "Generic (PLEG): container finished" podID="8998b37b-6c92-4227-82fe-d1ae56a26ee8" containerID="fa61a06eeafaa4aebbc5a3252fd558c86a4604d5db84faa144eba4f9eb23e47b" exitCode=0 Jan 27 12:50:01 crc kubenswrapper[4900]: I0127 12:50:01.668247 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-k9z2g-config-2pr6v" event={"ID":"8998b37b-6c92-4227-82fe-d1ae56a26ee8","Type":"ContainerDied","Data":"fa61a06eeafaa4aebbc5a3252fd558c86a4604d5db84faa144eba4f9eb23e47b"} Jan 27 12:50:01 crc kubenswrapper[4900]: I0127 12:50:01.672240 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c5578d3a-7a63-42bb-bd05-499cc28ed723","Type":"ContainerStarted","Data":"a2ca12e209a287db584f6de4e62352fd890d18c08c38cab14d046350851ba783"} Jan 27 12:50:01 crc kubenswrapper[4900]: I0127 12:50:01.672301 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c5578d3a-7a63-42bb-bd05-499cc28ed723","Type":"ContainerStarted","Data":"5c3a34be2f981a96a1cdfd1dff263f8c26b12e294a95c099b1a7eb04068b29e4"} Jan 27 12:50:01 crc kubenswrapper[4900]: I0127 12:50:01.731946 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=7.200588845 podStartE2EDuration="8.731897515s" podCreationTimestamp="2026-01-27 12:49:53 +0000 UTC" firstStartedPulling="2026-01-27 12:49:59.485271992 +0000 UTC m=+1426.722300202" lastFinishedPulling="2026-01-27 12:50:01.016580662 +0000 UTC m=+1428.253608872" observedRunningTime="2026-01-27 12:50:01.71663002 +0000 UTC m=+1428.953658230" watchObservedRunningTime="2026-01-27 12:50:01.731897515 +0000 UTC m=+1428.968925725" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.193695 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-m57ds"] Jan 27 12:50:02 crc kubenswrapper[4900]: E0127 12:50:02.194352 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="596b98f0-2b51-4a1e-ae50-a16dc5097101" containerName="dnsmasq-dns" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.194369 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="596b98f0-2b51-4a1e-ae50-a16dc5097101" containerName="dnsmasq-dns" Jan 27 12:50:02 crc kubenswrapper[4900]: E0127 12:50:02.194399 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="596b98f0-2b51-4a1e-ae50-a16dc5097101" containerName="init" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.194406 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="596b98f0-2b51-4a1e-ae50-a16dc5097101" containerName="init" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.194640 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="596b98f0-2b51-4a1e-ae50-a16dc5097101" containerName="dnsmasq-dns" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.195567 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-m57ds" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.200430 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.203157 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-m57ds"] Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.300342 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.335512 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9tmw\" (UniqueName: \"kubernetes.io/projected/65635e6c-be43-42a5-a370-884329911a60-kube-api-access-w9tmw\") pod \"root-account-create-update-m57ds\" (UID: \"65635e6c-be43-42a5-a370-884329911a60\") " pod="openstack/root-account-create-update-m57ds" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.335820 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65635e6c-be43-42a5-a370-884329911a60-operator-scripts\") pod \"root-account-create-update-m57ds\" (UID: \"65635e6c-be43-42a5-a370-884329911a60\") " pod="openstack/root-account-create-update-m57ds" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.367992 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.370201 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.375313 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.437761 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad92fe8d-3cd2-4bac-8d4e-8e0590824e17-operator-scripts\") pod \"ad92fe8d-3cd2-4bac-8d4e-8e0590824e17\" (UID: \"ad92fe8d-3cd2-4bac-8d4e-8e0590824e17\") " Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.437879 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qgjl8\" (UniqueName: \"kubernetes.io/projected/ad92fe8d-3cd2-4bac-8d4e-8e0590824e17-kube-api-access-qgjl8\") pod \"ad92fe8d-3cd2-4bac-8d4e-8e0590824e17\" (UID: \"ad92fe8d-3cd2-4bac-8d4e-8e0590824e17\") " Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.438710 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9tmw\" (UniqueName: \"kubernetes.io/projected/65635e6c-be43-42a5-a370-884329911a60-kube-api-access-w9tmw\") pod \"root-account-create-update-m57ds\" (UID: \"65635e6c-be43-42a5-a370-884329911a60\") " pod="openstack/root-account-create-update-m57ds" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.438919 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65635e6c-be43-42a5-a370-884329911a60-operator-scripts\") pod \"root-account-create-update-m57ds\" (UID: \"65635e6c-be43-42a5-a370-884329911a60\") " pod="openstack/root-account-create-update-m57ds" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.439068 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad92fe8d-3cd2-4bac-8d4e-8e0590824e17-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ad92fe8d-3cd2-4bac-8d4e-8e0590824e17" (UID: "ad92fe8d-3cd2-4bac-8d4e-8e0590824e17"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.439914 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65635e6c-be43-42a5-a370-884329911a60-operator-scripts\") pod \"root-account-create-update-m57ds\" (UID: \"65635e6c-be43-42a5-a370-884329911a60\") " pod="openstack/root-account-create-update-m57ds" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.444839 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad92fe8d-3cd2-4bac-8d4e-8e0590824e17-kube-api-access-qgjl8" (OuterVolumeSpecName: "kube-api-access-qgjl8") pod "ad92fe8d-3cd2-4bac-8d4e-8e0590824e17" (UID: "ad92fe8d-3cd2-4bac-8d4e-8e0590824e17"). InnerVolumeSpecName "kube-api-access-qgjl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.456837 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-accf-account-create-update-s6gmw" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.468353 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9tmw\" (UniqueName: \"kubernetes.io/projected/65635e6c-be43-42a5-a370-884329911a60-kube-api-access-w9tmw\") pod \"root-account-create-update-m57ds\" (UID: \"65635e6c-be43-42a5-a370-884329911a60\") " pod="openstack/root-account-create-update-m57ds" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.540100 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd31604a-235d-497a-a5fb-00c2928a0954-operator-scripts\") pod \"fd31604a-235d-497a-a5fb-00c2928a0954\" (UID: \"fd31604a-235d-497a-a5fb-00c2928a0954\") " Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.540340 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4qcn\" (UniqueName: \"kubernetes.io/projected/fd31604a-235d-497a-a5fb-00c2928a0954-kube-api-access-b4qcn\") pod \"fd31604a-235d-497a-a5fb-00c2928a0954\" (UID: \"fd31604a-235d-497a-a5fb-00c2928a0954\") " Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.540920 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd31604a-235d-497a-a5fb-00c2928a0954-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fd31604a-235d-497a-a5fb-00c2928a0954" (UID: "fd31604a-235d-497a-a5fb-00c2928a0954"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.542339 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ad92fe8d-3cd2-4bac-8d4e-8e0590824e17-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.542369 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qgjl8\" (UniqueName: \"kubernetes.io/projected/ad92fe8d-3cd2-4bac-8d4e-8e0590824e17-kube-api-access-qgjl8\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.542386 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd31604a-235d-497a-a5fb-00c2928a0954-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.544241 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd31604a-235d-497a-a5fb-00c2928a0954-kube-api-access-b4qcn" (OuterVolumeSpecName: "kube-api-access-b4qcn") pod "fd31604a-235d-497a-a5fb-00c2928a0954" (UID: "fd31604a-235d-497a-a5fb-00c2928a0954"). InnerVolumeSpecName "kube-api-access-b4qcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.619365 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-m57ds" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.644251 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b4qcn\" (UniqueName: \"kubernetes.io/projected/fd31604a-235d-497a-a5fb-00c2928a0954-kube-api-access-b4qcn\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.701203 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg" event={"ID":"ad92fe8d-3cd2-4bac-8d4e-8e0590824e17","Type":"ContainerDied","Data":"e62e38c31190c151d2ada67c1d1dd75eaf85bf049740770f504818d6d2d14768"} Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.702325 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e62e38c31190c151d2ada67c1d1dd75eaf85bf049740770f504818d6d2d14768" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.701434 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.704359 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-accf-account-create-update-s6gmw" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.704480 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-accf-account-create-update-s6gmw" event={"ID":"fd31604a-235d-497a-a5fb-00c2928a0954","Type":"ContainerDied","Data":"c1d60489c124e7e1a7468dd5218253d66ebfb399d393f7328a2fb879596923f7"} Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.704531 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c1d60489c124e7e1a7468dd5218253d66ebfb399d393f7328a2fb879596923f7" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.705628 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 27 12:50:02 crc kubenswrapper[4900]: I0127 12:50:02.706002 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.527736 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:50:03 crc kubenswrapper[4900]: W0127 12:50:03.541944 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65635e6c_be43_42a5_a370_884329911a60.slice/crio-ffeceff0afe69d795af9f3cda7c2035ab5bc0efdd7eb8ee2e10cd8e65bd6a8e9 WatchSource:0}: Error finding container ffeceff0afe69d795af9f3cda7c2035ab5bc0efdd7eb8ee2e10cd8e65bd6a8e9: Status 404 returned error can't find the container with id ffeceff0afe69d795af9f3cda7c2035ab5bc0efdd7eb8ee2e10cd8e65bd6a8e9 Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.546654 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-k9z2g" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.588049 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-m57ds"] Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.646965 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bprkm\" (UniqueName: \"kubernetes.io/projected/8998b37b-6c92-4227-82fe-d1ae56a26ee8-kube-api-access-bprkm\") pod \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.647025 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-run\") pod \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.647103 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8998b37b-6c92-4227-82fe-d1ae56a26ee8-additional-scripts\") pod \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.647181 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-run-ovn\") pod \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.647217 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8998b37b-6c92-4227-82fe-d1ae56a26ee8-scripts\") pod \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.647392 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-log-ovn\") pod \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\" (UID: \"8998b37b-6c92-4227-82fe-d1ae56a26ee8\") " Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.649230 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "8998b37b-6c92-4227-82fe-d1ae56a26ee8" (UID: "8998b37b-6c92-4227-82fe-d1ae56a26ee8"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.651485 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8998b37b-6c92-4227-82fe-d1ae56a26ee8-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "8998b37b-6c92-4227-82fe-d1ae56a26ee8" (UID: "8998b37b-6c92-4227-82fe-d1ae56a26ee8"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.651521 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-run" (OuterVolumeSpecName: "var-run") pod "8998b37b-6c92-4227-82fe-d1ae56a26ee8" (UID: "8998b37b-6c92-4227-82fe-d1ae56a26ee8"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.651543 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "8998b37b-6c92-4227-82fe-d1ae56a26ee8" (UID: "8998b37b-6c92-4227-82fe-d1ae56a26ee8"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.654787 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8998b37b-6c92-4227-82fe-d1ae56a26ee8-scripts" (OuterVolumeSpecName: "scripts") pod "8998b37b-6c92-4227-82fe-d1ae56a26ee8" (UID: "8998b37b-6c92-4227-82fe-d1ae56a26ee8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.671813 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8998b37b-6c92-4227-82fe-d1ae56a26ee8-kube-api-access-bprkm" (OuterVolumeSpecName: "kube-api-access-bprkm") pod "8998b37b-6c92-4227-82fe-d1ae56a26ee8" (UID: "8998b37b-6c92-4227-82fe-d1ae56a26ee8"). InnerVolumeSpecName "kube-api-access-bprkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.749372 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k9z2g-config-2pr6v" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.749383 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-k9z2g-config-2pr6v" event={"ID":"8998b37b-6c92-4227-82fe-d1ae56a26ee8","Type":"ContainerDied","Data":"42030099bef283c1a196830f4d33048c77b7a4c2802d91a65f079b2b948d61f5"} Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.749480 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42030099bef283c1a196830f4d33048c77b7a4c2802d91a65f079b2b948d61f5" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.752604 4900 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.752652 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bprkm\" (UniqueName: \"kubernetes.io/projected/8998b37b-6c92-4227-82fe-d1ae56a26ee8-kube-api-access-bprkm\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.752670 4900 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-run\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.752679 4900 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8998b37b-6c92-4227-82fe-d1ae56a26ee8-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.752691 4900 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8998b37b-6c92-4227-82fe-d1ae56a26ee8-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.752700 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8998b37b-6c92-4227-82fe-d1ae56a26ee8-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:03 crc kubenswrapper[4900]: I0127 12:50:03.759434 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-m57ds" event={"ID":"65635e6c-be43-42a5-a370-884329911a60","Type":"ContainerStarted","Data":"ffeceff0afe69d795af9f3cda7c2035ab5bc0efdd7eb8ee2e10cd8e65bd6a8e9"} Jan 27 12:50:04 crc kubenswrapper[4900]: I0127 12:50:04.650965 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-k9z2g-config-2pr6v"] Jan 27 12:50:04 crc kubenswrapper[4900]: I0127 12:50:04.659518 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-k9z2g-config-2pr6v"] Jan 27 12:50:04 crc kubenswrapper[4900]: I0127 12:50:04.789890 4900 generic.go:334] "Generic (PLEG): container finished" podID="65635e6c-be43-42a5-a370-884329911a60" containerID="6f6f62fac538794159a5813ad7b076ad5cb7c443c9827eea47cb4e68b7f77b92" exitCode=0 Jan 27 12:50:04 crc kubenswrapper[4900]: I0127 12:50:04.789956 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-m57ds" event={"ID":"65635e6c-be43-42a5-a370-884329911a60","Type":"ContainerDied","Data":"6f6f62fac538794159a5813ad7b076ad5cb7c443c9827eea47cb4e68b7f77b92"} Jan 27 12:50:05 crc kubenswrapper[4900]: I0127 12:50:05.761741 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 12:50:05 crc kubenswrapper[4900]: I0127 12:50:05.762657 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="2537092f-2211-4329-afe3-1e15bdd14256" containerName="prometheus" containerID="cri-o://274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a" gracePeriod=600 Jan 27 12:50:05 crc kubenswrapper[4900]: I0127 12:50:05.762742 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="2537092f-2211-4329-afe3-1e15bdd14256" containerName="config-reloader" containerID="cri-o://1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5" gracePeriod=600 Jan 27 12:50:05 crc kubenswrapper[4900]: I0127 12:50:05.762773 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="2537092f-2211-4329-afe3-1e15bdd14256" containerName="thanos-sidecar" containerID="cri-o://615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a" gracePeriod=600 Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.299243 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-m57ds" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.346322 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Jan 27 12:50:06 crc kubenswrapper[4900]: E0127 12:50:06.347001 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd31604a-235d-497a-a5fb-00c2928a0954" containerName="mariadb-account-create-update" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.347029 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd31604a-235d-497a-a5fb-00c2928a0954" containerName="mariadb-account-create-update" Jan 27 12:50:06 crc kubenswrapper[4900]: E0127 12:50:06.347085 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad92fe8d-3cd2-4bac-8d4e-8e0590824e17" containerName="mariadb-database-create" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.347096 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad92fe8d-3cd2-4bac-8d4e-8e0590824e17" containerName="mariadb-database-create" Jan 27 12:50:06 crc kubenswrapper[4900]: E0127 12:50:06.347118 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8998b37b-6c92-4227-82fe-d1ae56a26ee8" containerName="ovn-config" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.347129 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8998b37b-6c92-4227-82fe-d1ae56a26ee8" containerName="ovn-config" Jan 27 12:50:06 crc kubenswrapper[4900]: E0127 12:50:06.347144 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65635e6c-be43-42a5-a370-884329911a60" containerName="mariadb-account-create-update" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.347154 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="65635e6c-be43-42a5-a370-884329911a60" containerName="mariadb-account-create-update" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.347418 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="65635e6c-be43-42a5-a370-884329911a60" containerName="mariadb-account-create-update" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.347447 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8998b37b-6c92-4227-82fe-d1ae56a26ee8" containerName="ovn-config" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.347468 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd31604a-235d-497a-a5fb-00c2928a0954" containerName="mariadb-account-create-update" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.347488 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad92fe8d-3cd2-4bac-8d4e-8e0590824e17" containerName="mariadb-database-create" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.348596 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.351773 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.396732 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.419111 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65635e6c-be43-42a5-a370-884329911a60-operator-scripts\") pod \"65635e6c-be43-42a5-a370-884329911a60\" (UID: \"65635e6c-be43-42a5-a370-884329911a60\") " Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.419251 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9tmw\" (UniqueName: \"kubernetes.io/projected/65635e6c-be43-42a5-a370-884329911a60-kube-api-access-w9tmw\") pod \"65635e6c-be43-42a5-a370-884329911a60\" (UID: \"65635e6c-be43-42a5-a370-884329911a60\") " Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.421081 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65635e6c-be43-42a5-a370-884329911a60-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "65635e6c-be43-42a5-a370-884329911a60" (UID: "65635e6c-be43-42a5-a370-884329911a60"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.447519 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65635e6c-be43-42a5-a370-884329911a60-kube-api-access-w9tmw" (OuterVolumeSpecName: "kube-api-access-w9tmw") pod "65635e6c-be43-42a5-a370-884329911a60" (UID: "65635e6c-be43-42a5-a370-884329911a60"). InnerVolumeSpecName "kube-api-access-w9tmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.516848 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8998b37b-6c92-4227-82fe-d1ae56a26ee8" path="/var/lib/kubelet/pods/8998b37b-6c92-4227-82fe-d1ae56a26ee8/volumes" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.521803 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8ccf3d-f0c3-4607-a421-da50e552cecf-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\") " pod="openstack/mysqld-exporter-0" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.521945 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hl95\" (UniqueName: \"kubernetes.io/projected/2b8ccf3d-f0c3-4607-a421-da50e552cecf-kube-api-access-7hl95\") pod \"mysqld-exporter-0\" (UID: \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\") " pod="openstack/mysqld-exporter-0" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.522107 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8ccf3d-f0c3-4607-a421-da50e552cecf-config-data\") pod \"mysqld-exporter-0\" (UID: \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\") " pod="openstack/mysqld-exporter-0" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.522201 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9tmw\" (UniqueName: \"kubernetes.io/projected/65635e6c-be43-42a5-a370-884329911a60-kube-api-access-w9tmw\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.522221 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65635e6c-be43-42a5-a370-884329911a60-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.629459 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8ccf3d-f0c3-4607-a421-da50e552cecf-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\") " pod="openstack/mysqld-exporter-0" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.629612 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hl95\" (UniqueName: \"kubernetes.io/projected/2b8ccf3d-f0c3-4607-a421-da50e552cecf-kube-api-access-7hl95\") pod \"mysqld-exporter-0\" (UID: \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\") " pod="openstack/mysqld-exporter-0" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.629737 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8ccf3d-f0c3-4607-a421-da50e552cecf-config-data\") pod \"mysqld-exporter-0\" (UID: \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\") " pod="openstack/mysqld-exporter-0" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.634382 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8ccf3d-f0c3-4607-a421-da50e552cecf-config-data\") pod \"mysqld-exporter-0\" (UID: \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\") " pod="openstack/mysqld-exporter-0" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.634775 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8ccf3d-f0c3-4607-a421-da50e552cecf-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\") " pod="openstack/mysqld-exporter-0" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.648706 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hl95\" (UniqueName: \"kubernetes.io/projected/2b8ccf3d-f0c3-4607-a421-da50e552cecf-kube-api-access-7hl95\") pod \"mysqld-exporter-0\" (UID: \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\") " pod="openstack/mysqld-exporter-0" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.668179 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.822734 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.848218 4900 generic.go:334] "Generic (PLEG): container finished" podID="2537092f-2211-4329-afe3-1e15bdd14256" containerID="615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a" exitCode=0 Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.848257 4900 generic.go:334] "Generic (PLEG): container finished" podID="2537092f-2211-4329-afe3-1e15bdd14256" containerID="1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5" exitCode=0 Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.848268 4900 generic.go:334] "Generic (PLEG): container finished" podID="2537092f-2211-4329-afe3-1e15bdd14256" containerID="274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a" exitCode=0 Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.848322 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"2537092f-2211-4329-afe3-1e15bdd14256","Type":"ContainerDied","Data":"615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a"} Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.848430 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"2537092f-2211-4329-afe3-1e15bdd14256","Type":"ContainerDied","Data":"1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5"} Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.848449 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"2537092f-2211-4329-afe3-1e15bdd14256","Type":"ContainerDied","Data":"274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a"} Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.848460 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"2537092f-2211-4329-afe3-1e15bdd14256","Type":"ContainerDied","Data":"83b8c607df95c25e894c821482a1db35c4151bbefef56df832703ffedc1d7ee4"} Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.848478 4900 scope.go:117] "RemoveContainer" containerID="615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.857705 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-m57ds" event={"ID":"65635e6c-be43-42a5-a370-884329911a60","Type":"ContainerDied","Data":"ffeceff0afe69d795af9f3cda7c2035ab5bc0efdd7eb8ee2e10cd8e65bd6a8e9"} Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.857750 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ffeceff0afe69d795af9f3cda7c2035ab5bc0efdd7eb8ee2e10cd8e65bd6a8e9" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.857783 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-m57ds" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.935916 4900 scope.go:117] "RemoveContainer" containerID="1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.945373 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-web-config\") pod \"2537092f-2211-4329-afe3-1e15bdd14256\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.945450 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/2537092f-2211-4329-afe3-1e15bdd14256-tls-assets\") pod \"2537092f-2211-4329-afe3-1e15bdd14256\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.945486 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-config\") pod \"2537092f-2211-4329-afe3-1e15bdd14256\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.945507 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gt8pz\" (UniqueName: \"kubernetes.io/projected/2537092f-2211-4329-afe3-1e15bdd14256-kube-api-access-gt8pz\") pod \"2537092f-2211-4329-afe3-1e15bdd14256\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.945589 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-2\") pod \"2537092f-2211-4329-afe3-1e15bdd14256\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.952871 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aec966f1-268a-4a70-9845-26adc756570e\") pod \"2537092f-2211-4329-afe3-1e15bdd14256\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.954702 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "2537092f-2211-4329-afe3-1e15bdd14256" (UID: "2537092f-2211-4329-afe3-1e15bdd14256"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.959307 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-0\") pod \"2537092f-2211-4329-afe3-1e15bdd14256\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.959423 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-thanos-prometheus-http-client-file\") pod \"2537092f-2211-4329-afe3-1e15bdd14256\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.959535 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/2537092f-2211-4329-afe3-1e15bdd14256-config-out\") pod \"2537092f-2211-4329-afe3-1e15bdd14256\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.959590 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-1\") pod \"2537092f-2211-4329-afe3-1e15bdd14256\" (UID: \"2537092f-2211-4329-afe3-1e15bdd14256\") " Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.960332 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "2537092f-2211-4329-afe3-1e15bdd14256" (UID: "2537092f-2211-4329-afe3-1e15bdd14256"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.961006 4900 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.961029 4900 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.962818 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2537092f-2211-4329-afe3-1e15bdd14256-kube-api-access-gt8pz" (OuterVolumeSpecName: "kube-api-access-gt8pz") pod "2537092f-2211-4329-afe3-1e15bdd14256" (UID: "2537092f-2211-4329-afe3-1e15bdd14256"). InnerVolumeSpecName "kube-api-access-gt8pz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.963395 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2537092f-2211-4329-afe3-1e15bdd14256-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "2537092f-2211-4329-afe3-1e15bdd14256" (UID: "2537092f-2211-4329-afe3-1e15bdd14256"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.964812 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "2537092f-2211-4329-afe3-1e15bdd14256" (UID: "2537092f-2211-4329-afe3-1e15bdd14256"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.976232 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2537092f-2211-4329-afe3-1e15bdd14256-config-out" (OuterVolumeSpecName: "config-out") pod "2537092f-2211-4329-afe3-1e15bdd14256" (UID: "2537092f-2211-4329-afe3-1e15bdd14256"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.984962 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-config" (OuterVolumeSpecName: "config") pod "2537092f-2211-4329-afe3-1e15bdd14256" (UID: "2537092f-2211-4329-afe3-1e15bdd14256"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.987942 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "2537092f-2211-4329-afe3-1e15bdd14256" (UID: "2537092f-2211-4329-afe3-1e15bdd14256"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:06 crc kubenswrapper[4900]: I0127 12:50:06.991007 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aec966f1-268a-4a70-9845-26adc756570e" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "2537092f-2211-4329-afe3-1e15bdd14256" (UID: "2537092f-2211-4329-afe3-1e15bdd14256"). InnerVolumeSpecName "pvc-aec966f1-268a-4a70-9845-26adc756570e". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:06.996279 4900 scope.go:117] "RemoveContainer" containerID="274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.043943 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-web-config" (OuterVolumeSpecName: "web-config") pod "2537092f-2211-4329-afe3-1e15bdd14256" (UID: "2537092f-2211-4329-afe3-1e15bdd14256"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.063547 4900 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/2537092f-2211-4329-afe3-1e15bdd14256-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.063590 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.063603 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gt8pz\" (UniqueName: \"kubernetes.io/projected/2537092f-2211-4329-afe3-1e15bdd14256-kube-api-access-gt8pz\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.063688 4900 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-aec966f1-268a-4a70-9845-26adc756570e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aec966f1-268a-4a70-9845-26adc756570e\") on node \"crc\" " Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.063707 4900 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.063721 4900 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/2537092f-2211-4329-afe3-1e15bdd14256-config-out\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.063736 4900 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/2537092f-2211-4329-afe3-1e15bdd14256-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.063747 4900 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/2537092f-2211-4329-afe3-1e15bdd14256-web-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.112540 4900 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.112777 4900 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-aec966f1-268a-4a70-9845-26adc756570e" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aec966f1-268a-4a70-9845-26adc756570e") on node "crc" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.150428 4900 scope.go:117] "RemoveContainer" containerID="f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.166606 4900 reconciler_common.go:293] "Volume detached for volume \"pvc-aec966f1-268a-4a70-9845-26adc756570e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aec966f1-268a-4a70-9845-26adc756570e\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.197069 4900 scope.go:117] "RemoveContainer" containerID="615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a" Jan 27 12:50:07 crc kubenswrapper[4900]: E0127 12:50:07.198159 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a\": container with ID starting with 615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a not found: ID does not exist" containerID="615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.198211 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a"} err="failed to get container status \"615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a\": rpc error: code = NotFound desc = could not find container \"615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a\": container with ID starting with 615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a not found: ID does not exist" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.198242 4900 scope.go:117] "RemoveContainer" containerID="1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5" Jan 27 12:50:07 crc kubenswrapper[4900]: E0127 12:50:07.204098 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5\": container with ID starting with 1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5 not found: ID does not exist" containerID="1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.204248 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5"} err="failed to get container status \"1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5\": rpc error: code = NotFound desc = could not find container \"1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5\": container with ID starting with 1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5 not found: ID does not exist" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.204299 4900 scope.go:117] "RemoveContainer" containerID="274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a" Jan 27 12:50:07 crc kubenswrapper[4900]: E0127 12:50:07.212533 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a\": container with ID starting with 274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a not found: ID does not exist" containerID="274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.212638 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a"} err="failed to get container status \"274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a\": rpc error: code = NotFound desc = could not find container \"274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a\": container with ID starting with 274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a not found: ID does not exist" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.212696 4900 scope.go:117] "RemoveContainer" containerID="f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a" Jan 27 12:50:07 crc kubenswrapper[4900]: E0127 12:50:07.213699 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a\": container with ID starting with f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a not found: ID does not exist" containerID="f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.213758 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a"} err="failed to get container status \"f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a\": rpc error: code = NotFound desc = could not find container \"f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a\": container with ID starting with f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a not found: ID does not exist" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.213803 4900 scope.go:117] "RemoveContainer" containerID="615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.214460 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a"} err="failed to get container status \"615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a\": rpc error: code = NotFound desc = could not find container \"615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a\": container with ID starting with 615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a not found: ID does not exist" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.214493 4900 scope.go:117] "RemoveContainer" containerID="1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.220424 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5"} err="failed to get container status \"1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5\": rpc error: code = NotFound desc = could not find container \"1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5\": container with ID starting with 1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5 not found: ID does not exist" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.220499 4900 scope.go:117] "RemoveContainer" containerID="274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.227854 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a"} err="failed to get container status \"274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a\": rpc error: code = NotFound desc = could not find container \"274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a\": container with ID starting with 274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a not found: ID does not exist" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.227907 4900 scope.go:117] "RemoveContainer" containerID="f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.231492 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a"} err="failed to get container status \"f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a\": rpc error: code = NotFound desc = could not find container \"f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a\": container with ID starting with f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a not found: ID does not exist" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.231530 4900 scope.go:117] "RemoveContainer" containerID="615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.234126 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a"} err="failed to get container status \"615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a\": rpc error: code = NotFound desc = could not find container \"615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a\": container with ID starting with 615c3a6bd13a5195ffeb8f199273b1212052c54418e5b5d938887662ae7ab61a not found: ID does not exist" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.234169 4900 scope.go:117] "RemoveContainer" containerID="1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.235250 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5"} err="failed to get container status \"1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5\": rpc error: code = NotFound desc = could not find container \"1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5\": container with ID starting with 1c9296af185804a4bb7cfd8e5dcd087a2f824b8b0992f511674699d9587babf5 not found: ID does not exist" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.235275 4900 scope.go:117] "RemoveContainer" containerID="274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.235857 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a"} err="failed to get container status \"274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a\": rpc error: code = NotFound desc = could not find container \"274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a\": container with ID starting with 274b78c87565eb77ff0ddde0ae0f32cfc985b1b55212e4bdcf76598359edd66a not found: ID does not exist" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.235879 4900 scope.go:117] "RemoveContainer" containerID="f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.236191 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a"} err="failed to get container status \"f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a\": rpc error: code = NotFound desc = could not find container \"f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a\": container with ID starting with f7d260d8e203b1da98c98899c97dbca13355dd14bd1ed1d4b55f7b71f444f44a not found: ID does not exist" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.350756 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.891220 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.893910 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"2b8ccf3d-f0c3-4607-a421-da50e552cecf","Type":"ContainerStarted","Data":"bdd13345afd793409172d7a1e58729fb2bdb25c28332a684eac78e63aa3113a1"} Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.957622 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 12:50:07 crc kubenswrapper[4900]: I0127 12:50:07.982955 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.010720 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 12:50:08 crc kubenswrapper[4900]: E0127 12:50:08.011349 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2537092f-2211-4329-afe3-1e15bdd14256" containerName="init-config-reloader" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.011382 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2537092f-2211-4329-afe3-1e15bdd14256" containerName="init-config-reloader" Jan 27 12:50:08 crc kubenswrapper[4900]: E0127 12:50:08.011413 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2537092f-2211-4329-afe3-1e15bdd14256" containerName="prometheus" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.011423 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2537092f-2211-4329-afe3-1e15bdd14256" containerName="prometheus" Jan 27 12:50:08 crc kubenswrapper[4900]: E0127 12:50:08.011443 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2537092f-2211-4329-afe3-1e15bdd14256" containerName="thanos-sidecar" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.011451 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2537092f-2211-4329-afe3-1e15bdd14256" containerName="thanos-sidecar" Jan 27 12:50:08 crc kubenswrapper[4900]: E0127 12:50:08.011480 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2537092f-2211-4329-afe3-1e15bdd14256" containerName="config-reloader" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.011487 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2537092f-2211-4329-afe3-1e15bdd14256" containerName="config-reloader" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.011761 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="2537092f-2211-4329-afe3-1e15bdd14256" containerName="prometheus" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.011788 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="2537092f-2211-4329-afe3-1e15bdd14256" containerName="config-reloader" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.011825 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="2537092f-2211-4329-afe3-1e15bdd14256" containerName="thanos-sidecar" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.014692 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.021787 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.022028 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.022290 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.022421 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.022528 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.022640 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.022807 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-ljsg8" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.023021 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.023338 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.037226 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.093743 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmzww\" (UniqueName: \"kubernetes.io/projected/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-kube-api-access-hmzww\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.093807 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.093898 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.093948 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-aec966f1-268a-4a70-9845-26adc756570e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aec966f1-268a-4a70-9845-26adc756570e\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.093996 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.094095 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.094157 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.094236 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-config\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.094305 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.094369 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.094399 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.094446 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.094471 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.198195 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.198310 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.198361 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.198410 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.198441 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.198491 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmzww\" (UniqueName: \"kubernetes.io/projected/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-kube-api-access-hmzww\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.198522 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.198557 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.198608 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-aec966f1-268a-4a70-9845-26adc756570e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aec966f1-268a-4a70-9845-26adc756570e\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.198651 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.198729 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.198795 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.198835 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-config\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.201824 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.205757 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.206321 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.207309 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.207454 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-config\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.211559 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.211645 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-aec966f1-268a-4a70-9845-26adc756570e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aec966f1-268a-4a70-9845-26adc756570e\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9c38213e9dd6f58dfaaef0d7677bd37505bee9fcde8c91084cb29405ba765d6a/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.211783 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.215688 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.220985 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.221802 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.221787 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.226772 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmzww\" (UniqueName: \"kubernetes.io/projected/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-kube-api-access-hmzww\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.229333 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.285612 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-aec966f1-268a-4a70-9845-26adc756570e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aec966f1-268a-4a70-9845-26adc756570e\") pod \"prometheus-metric-storage-0\" (UID: \"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3\") " pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.388743 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:08 crc kubenswrapper[4900]: I0127 12:50:08.502438 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2537092f-2211-4329-afe3-1e15bdd14256" path="/var/lib/kubelet/pods/2537092f-2211-4329-afe3-1e15bdd14256/volumes" Jan 27 12:50:09 crc kubenswrapper[4900]: I0127 12:50:09.127179 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:50:09 crc kubenswrapper[4900]: E0127 12:50:09.127623 4900 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 27 12:50:09 crc kubenswrapper[4900]: E0127 12:50:09.127640 4900 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 27 12:50:09 crc kubenswrapper[4900]: E0127 12:50:09.127691 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift podName:0c2f90a4-baa0-4eeb-a797-3664c306818b nodeName:}" failed. No retries permitted until 2026-01-27 12:50:41.127676602 +0000 UTC m=+1468.364704812 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift") pod "swift-storage-0" (UID: "0c2f90a4-baa0-4eeb-a797-3664c306818b") : configmap "swift-ring-files" not found Jan 27 12:50:09 crc kubenswrapper[4900]: W0127 12:50:09.696661 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded32ddcf_11ab_4c82_bbdb_5be752fb6ae3.slice/crio-5db1d435bd65bb3dbbb52e654c93ec76c37dcc3ab002d378639e6c02b510315b WatchSource:0}: Error finding container 5db1d435bd65bb3dbbb52e654c93ec76c37dcc3ab002d378639e6c02b510315b: Status 404 returned error can't find the container with id 5db1d435bd65bb3dbbb52e654c93ec76c37dcc3ab002d378639e6c02b510315b Jan 27 12:50:09 crc kubenswrapper[4900]: I0127 12:50:09.701975 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 27 12:50:09 crc kubenswrapper[4900]: I0127 12:50:09.928483 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3","Type":"ContainerStarted","Data":"5db1d435bd65bb3dbbb52e654c93ec76c37dcc3ab002d378639e6c02b510315b"} Jan 27 12:50:09 crc kubenswrapper[4900]: I0127 12:50:09.930831 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"2b8ccf3d-f0c3-4607-a421-da50e552cecf","Type":"ContainerStarted","Data":"3e519ecf3229697af080a0ed0c48694f3aba5f1d04da13b3b61fe36084aa42e9"} Jan 27 12:50:09 crc kubenswrapper[4900]: I0127 12:50:09.933737 4900 generic.go:334] "Generic (PLEG): container finished" podID="33ba5ed7-2f73-4fda-94cb-568e6a8c9843" containerID="e4e04db5e11aa038069db42d6647c20aae47621f2214403a482c17492ec28460" exitCode=0 Jan 27 12:50:09 crc kubenswrapper[4900]: I0127 12:50:09.933793 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-dzlzf" event={"ID":"33ba5ed7-2f73-4fda-94cb-568e6a8c9843","Type":"ContainerDied","Data":"e4e04db5e11aa038069db42d6647c20aae47621f2214403a482c17492ec28460"} Jan 27 12:50:09 crc kubenswrapper[4900]: I0127 12:50:09.962472 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.083618905 podStartE2EDuration="3.962448856s" podCreationTimestamp="2026-01-27 12:50:06 +0000 UTC" firstStartedPulling="2026-01-27 12:50:07.361172755 +0000 UTC m=+1434.598200965" lastFinishedPulling="2026-01-27 12:50:09.240002706 +0000 UTC m=+1436.477030916" observedRunningTime="2026-01-27 12:50:09.949386675 +0000 UTC m=+1437.186414905" watchObservedRunningTime="2026-01-27 12:50:09.962448856 +0000 UTC m=+1437.199477066" Jan 27 12:50:10 crc kubenswrapper[4900]: I0127 12:50:10.826901 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="ca415683-2d53-4bdc-b9f7-c98610a65cc3" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Jan 27 12:50:10 crc kubenswrapper[4900]: I0127 12:50:10.865652 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Jan 27 12:50:10 crc kubenswrapper[4900]: I0127 12:50:10.898490 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-1" Jan 27 12:50:11 crc kubenswrapper[4900]: I0127 12:50:11.300394 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:50:13 crc kubenswrapper[4900]: I0127 12:50:13.987636 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3","Type":"ContainerStarted","Data":"628f2094942c6b5200ab077f7789031ee4bbc90197e69c96d20b886e90cd872c"} Jan 27 12:50:14 crc kubenswrapper[4900]: I0127 12:50:14.022905 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 27 12:50:17 crc kubenswrapper[4900]: I0127 12:50:17.295847 4900 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podf2794095-3d03-4cf0-8e7b-ecc39fb3db7a"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podf2794095-3d03-4cf0-8e7b-ecc39fb3db7a] : Timed out while waiting for systemd to remove kubepods-besteffort-podf2794095_3d03_4cf0_8e7b_ecc39fb3db7a.slice" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.684544 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.836076 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-scripts\") pod \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.836437 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-swiftconf\") pod \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.836809 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-dispersionconf\") pod \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.837621 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-etc-swift\") pod \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.837664 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-ring-data-devices\") pod \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.837711 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-combined-ca-bundle\") pod \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.837771 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hx2tc\" (UniqueName: \"kubernetes.io/projected/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-kube-api-access-hx2tc\") pod \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\" (UID: \"33ba5ed7-2f73-4fda-94cb-568e6a8c9843\") " Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.838420 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "33ba5ed7-2f73-4fda-94cb-568e6a8c9843" (UID: "33ba5ed7-2f73-4fda-94cb-568e6a8c9843"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.839505 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "33ba5ed7-2f73-4fda-94cb-568e6a8c9843" (UID: "33ba5ed7-2f73-4fda-94cb-568e6a8c9843"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.845687 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-kube-api-access-hx2tc" (OuterVolumeSpecName: "kube-api-access-hx2tc") pod "33ba5ed7-2f73-4fda-94cb-568e6a8c9843" (UID: "33ba5ed7-2f73-4fda-94cb-568e6a8c9843"). InnerVolumeSpecName "kube-api-access-hx2tc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.848975 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "33ba5ed7-2f73-4fda-94cb-568e6a8c9843" (UID: "33ba5ed7-2f73-4fda-94cb-568e6a8c9843"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.868338 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "33ba5ed7-2f73-4fda-94cb-568e6a8c9843" (UID: "33ba5ed7-2f73-4fda-94cb-568e6a8c9843"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.876183 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "33ba5ed7-2f73-4fda-94cb-568e6a8c9843" (UID: "33ba5ed7-2f73-4fda-94cb-568e6a8c9843"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.880591 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-scripts" (OuterVolumeSpecName: "scripts") pod "33ba5ed7-2f73-4fda-94cb-568e6a8c9843" (UID: "33ba5ed7-2f73-4fda-94cb-568e6a8c9843"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.941479 4900 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.941527 4900 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.941537 4900 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.941547 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.941555 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hx2tc\" (UniqueName: \"kubernetes.io/projected/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-kube-api-access-hx2tc\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.941566 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:19 crc kubenswrapper[4900]: I0127 12:50:19.941574 4900 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/33ba5ed7-2f73-4fda-94cb-568e6a8c9843-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:20 crc kubenswrapper[4900]: I0127 12:50:20.081362 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-dzlzf" event={"ID":"33ba5ed7-2f73-4fda-94cb-568e6a8c9843","Type":"ContainerDied","Data":"2b366d4b93c1021fd19a833a5b7bff21d79d4fbc41d8462f473f73ef40e768c7"} Jan 27 12:50:20 crc kubenswrapper[4900]: I0127 12:50:20.081410 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b366d4b93c1021fd19a833a5b7bff21d79d4fbc41d8462f473f73ef40e768c7" Jan 27 12:50:20 crc kubenswrapper[4900]: I0127 12:50:20.081416 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dzlzf" Jan 27 12:50:20 crc kubenswrapper[4900]: I0127 12:50:20.829386 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 27 12:50:20 crc kubenswrapper[4900]: I0127 12:50:20.867730 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-2" Jan 27 12:50:21 crc kubenswrapper[4900]: I0127 12:50:21.092520 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5rc2l" event={"ID":"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4","Type":"ContainerStarted","Data":"194d7724541532f8815232c97f649e2362bb78b3d29d8f1d12b016971da45504"} Jan 27 12:50:21 crc kubenswrapper[4900]: I0127 12:50:21.099331 4900 generic.go:334] "Generic (PLEG): container finished" podID="ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3" containerID="628f2094942c6b5200ab077f7789031ee4bbc90197e69c96d20b886e90cd872c" exitCode=0 Jan 27 12:50:21 crc kubenswrapper[4900]: I0127 12:50:21.099382 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3","Type":"ContainerDied","Data":"628f2094942c6b5200ab077f7789031ee4bbc90197e69c96d20b886e90cd872c"} Jan 27 12:50:21 crc kubenswrapper[4900]: I0127 12:50:21.123808 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-5rc2l" podStartSLOduration=7.464050705 podStartE2EDuration="27.123779882s" podCreationTimestamp="2026-01-27 12:49:54 +0000 UTC" firstStartedPulling="2026-01-27 12:49:59.865798025 +0000 UTC m=+1427.102826235" lastFinishedPulling="2026-01-27 12:50:19.525527202 +0000 UTC m=+1446.762555412" observedRunningTime="2026-01-27 12:50:21.111767402 +0000 UTC m=+1448.348795612" watchObservedRunningTime="2026-01-27 12:50:21.123779882 +0000 UTC m=+1448.360808092" Jan 27 12:50:22 crc kubenswrapper[4900]: I0127 12:50:22.111824 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3","Type":"ContainerStarted","Data":"7c78981945ea83545e81071a3271aacf183bcbe79f41f33500b3b041b7a752b0"} Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.485782 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-sfjk4"] Jan 27 12:50:23 crc kubenswrapper[4900]: E0127 12:50:23.486816 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33ba5ed7-2f73-4fda-94cb-568e6a8c9843" containerName="swift-ring-rebalance" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.486838 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="33ba5ed7-2f73-4fda-94cb-568e6a8c9843" containerName="swift-ring-rebalance" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.487152 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="33ba5ed7-2f73-4fda-94cb-568e6a8c9843" containerName="swift-ring-rebalance" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.488213 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sfjk4" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.506639 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-sfjk4"] Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.612490 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-8xhbm"] Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.614513 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-8xhbm" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.631182 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc96b906-bcb7-451e-bab7-8b4c00058ed9-operator-scripts\") pod \"cinder-db-create-sfjk4\" (UID: \"dc96b906-bcb7-451e-bab7-8b4c00058ed9\") " pod="openstack/cinder-db-create-sfjk4" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.631751 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llq5h\" (UniqueName: \"kubernetes.io/projected/dc96b906-bcb7-451e-bab7-8b4c00058ed9-kube-api-access-llq5h\") pod \"cinder-db-create-sfjk4\" (UID: \"dc96b906-bcb7-451e-bab7-8b4c00058ed9\") " pod="openstack/cinder-db-create-sfjk4" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.643828 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-8xhbm"] Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.696133 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-c7be-account-create-update-wtq6h"] Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.698120 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c7be-account-create-update-wtq6h" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.704348 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.712095 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-c7be-account-create-update-wtq6h"] Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.736823 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llq5h\" (UniqueName: \"kubernetes.io/projected/dc96b906-bcb7-451e-bab7-8b4c00058ed9-kube-api-access-llq5h\") pod \"cinder-db-create-sfjk4\" (UID: \"dc96b906-bcb7-451e-bab7-8b4c00058ed9\") " pod="openstack/cinder-db-create-sfjk4" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.736927 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10a04b5b-42d9-488f-b5b3-21e86476918a-operator-scripts\") pod \"barbican-db-create-8xhbm\" (UID: \"10a04b5b-42d9-488f-b5b3-21e86476918a\") " pod="openstack/barbican-db-create-8xhbm" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.737048 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lf54x\" (UniqueName: \"kubernetes.io/projected/10a04b5b-42d9-488f-b5b3-21e86476918a-kube-api-access-lf54x\") pod \"barbican-db-create-8xhbm\" (UID: \"10a04b5b-42d9-488f-b5b3-21e86476918a\") " pod="openstack/barbican-db-create-8xhbm" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.737099 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc96b906-bcb7-451e-bab7-8b4c00058ed9-operator-scripts\") pod \"cinder-db-create-sfjk4\" (UID: \"dc96b906-bcb7-451e-bab7-8b4c00058ed9\") " pod="openstack/cinder-db-create-sfjk4" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.737973 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc96b906-bcb7-451e-bab7-8b4c00058ed9-operator-scripts\") pod \"cinder-db-create-sfjk4\" (UID: \"dc96b906-bcb7-451e-bab7-8b4c00058ed9\") " pod="openstack/cinder-db-create-sfjk4" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.820461 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llq5h\" (UniqueName: \"kubernetes.io/projected/dc96b906-bcb7-451e-bab7-8b4c00058ed9-kube-api-access-llq5h\") pod \"cinder-db-create-sfjk4\" (UID: \"dc96b906-bcb7-451e-bab7-8b4c00058ed9\") " pod="openstack/cinder-db-create-sfjk4" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.839672 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10a04b5b-42d9-488f-b5b3-21e86476918a-operator-scripts\") pod \"barbican-db-create-8xhbm\" (UID: \"10a04b5b-42d9-488f-b5b3-21e86476918a\") " pod="openstack/barbican-db-create-8xhbm" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.839806 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6lcd\" (UniqueName: \"kubernetes.io/projected/6a470e2e-c53a-4c18-ae8f-236d3d410455-kube-api-access-m6lcd\") pod \"cinder-c7be-account-create-update-wtq6h\" (UID: \"6a470e2e-c53a-4c18-ae8f-236d3d410455\") " pod="openstack/cinder-c7be-account-create-update-wtq6h" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.839835 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a470e2e-c53a-4c18-ae8f-236d3d410455-operator-scripts\") pod \"cinder-c7be-account-create-update-wtq6h\" (UID: \"6a470e2e-c53a-4c18-ae8f-236d3d410455\") " pod="openstack/cinder-c7be-account-create-update-wtq6h" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.839896 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lf54x\" (UniqueName: \"kubernetes.io/projected/10a04b5b-42d9-488f-b5b3-21e86476918a-kube-api-access-lf54x\") pod \"barbican-db-create-8xhbm\" (UID: \"10a04b5b-42d9-488f-b5b3-21e86476918a\") " pod="openstack/barbican-db-create-8xhbm" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.841039 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10a04b5b-42d9-488f-b5b3-21e86476918a-operator-scripts\") pod \"barbican-db-create-8xhbm\" (UID: \"10a04b5b-42d9-488f-b5b3-21e86476918a\") " pod="openstack/barbican-db-create-8xhbm" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.883935 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lf54x\" (UniqueName: \"kubernetes.io/projected/10a04b5b-42d9-488f-b5b3-21e86476918a-kube-api-access-lf54x\") pod \"barbican-db-create-8xhbm\" (UID: \"10a04b5b-42d9-488f-b5b3-21e86476918a\") " pod="openstack/barbican-db-create-8xhbm" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.900477 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-529rf"] Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.910552 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-6c48-account-create-update-nw88h"] Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.911989 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-6c48-account-create-update-nw88h" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.915454 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-529rf" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.920544 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.923256 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-529rf"] Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.951800 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-6c48-account-create-update-nw88h"] Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.952481 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-8xhbm" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.956707 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6lcd\" (UniqueName: \"kubernetes.io/projected/6a470e2e-c53a-4c18-ae8f-236d3d410455-kube-api-access-m6lcd\") pod \"cinder-c7be-account-create-update-wtq6h\" (UID: \"6a470e2e-c53a-4c18-ae8f-236d3d410455\") " pod="openstack/cinder-c7be-account-create-update-wtq6h" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.956780 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a470e2e-c53a-4c18-ae8f-236d3d410455-operator-scripts\") pod \"cinder-c7be-account-create-update-wtq6h\" (UID: \"6a470e2e-c53a-4c18-ae8f-236d3d410455\") " pod="openstack/cinder-c7be-account-create-update-wtq6h" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.957977 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a470e2e-c53a-4c18-ae8f-236d3d410455-operator-scripts\") pod \"cinder-c7be-account-create-update-wtq6h\" (UID: \"6a470e2e-c53a-4c18-ae8f-236d3d410455\") " pod="openstack/cinder-c7be-account-create-update-wtq6h" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.967703 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-5eac-account-create-update-ggscp"] Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.969567 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5eac-account-create-update-ggscp" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.978380 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.983527 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-5eac-account-create-update-ggscp"] Jan 27 12:50:23 crc kubenswrapper[4900]: I0127 12:50:23.990514 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6lcd\" (UniqueName: \"kubernetes.io/projected/6a470e2e-c53a-4c18-ae8f-236d3d410455-kube-api-access-m6lcd\") pod \"cinder-c7be-account-create-update-wtq6h\" (UID: \"6a470e2e-c53a-4c18-ae8f-236d3d410455\") " pod="openstack/cinder-c7be-account-create-update-wtq6h" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.023649 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c7be-account-create-update-wtq6h" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.063906 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kx2xr\" (UniqueName: \"kubernetes.io/projected/fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1-kube-api-access-kx2xr\") pod \"heat-6c48-account-create-update-nw88h\" (UID: \"fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1\") " pod="openstack/heat-6c48-account-create-update-nw88h" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.064018 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1-operator-scripts\") pod \"heat-6c48-account-create-update-nw88h\" (UID: \"fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1\") " pod="openstack/heat-6c48-account-create-update-nw88h" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.064094 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4r2vt\" (UniqueName: \"kubernetes.io/projected/51b69b53-dd5e-43e9-99c0-83c33e8e2168-kube-api-access-4r2vt\") pod \"heat-db-create-529rf\" (UID: \"51b69b53-dd5e-43e9-99c0-83c33e8e2168\") " pod="openstack/heat-db-create-529rf" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.064119 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cd54e89-bf17-4216-955a-08f26a918d67-operator-scripts\") pod \"barbican-5eac-account-create-update-ggscp\" (UID: \"6cd54e89-bf17-4216-955a-08f26a918d67\") " pod="openstack/barbican-5eac-account-create-update-ggscp" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.064216 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7m4n6\" (UniqueName: \"kubernetes.io/projected/6cd54e89-bf17-4216-955a-08f26a918d67-kube-api-access-7m4n6\") pod \"barbican-5eac-account-create-update-ggscp\" (UID: \"6cd54e89-bf17-4216-955a-08f26a918d67\") " pod="openstack/barbican-5eac-account-create-update-ggscp" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.064257 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51b69b53-dd5e-43e9-99c0-83c33e8e2168-operator-scripts\") pod \"heat-db-create-529rf\" (UID: \"51b69b53-dd5e-43e9-99c0-83c33e8e2168\") " pod="openstack/heat-db-create-529rf" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.085595 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-cp5z4"] Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.089190 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-cp5z4" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.093611 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.093983 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.101549 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-cp5z4"] Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.104485 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-k2fbf" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.104750 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.112880 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-qkl46"] Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.112957 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sfjk4" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.115087 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qkl46" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.126542 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-3073-account-create-update-wvxth"] Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.131246 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3073-account-create-update-wvxth" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.133837 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.156301 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-qkl46"] Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.172954 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kx2xr\" (UniqueName: \"kubernetes.io/projected/fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1-kube-api-access-kx2xr\") pod \"heat-6c48-account-create-update-nw88h\" (UID: \"fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1\") " pod="openstack/heat-6c48-account-create-update-nw88h" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.173078 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1-operator-scripts\") pod \"heat-6c48-account-create-update-nw88h\" (UID: \"fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1\") " pod="openstack/heat-6c48-account-create-update-nw88h" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.173131 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4r2vt\" (UniqueName: \"kubernetes.io/projected/51b69b53-dd5e-43e9-99c0-83c33e8e2168-kube-api-access-4r2vt\") pod \"heat-db-create-529rf\" (UID: \"51b69b53-dd5e-43e9-99c0-83c33e8e2168\") " pod="openstack/heat-db-create-529rf" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.173156 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cd54e89-bf17-4216-955a-08f26a918d67-operator-scripts\") pod \"barbican-5eac-account-create-update-ggscp\" (UID: \"6cd54e89-bf17-4216-955a-08f26a918d67\") " pod="openstack/barbican-5eac-account-create-update-ggscp" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.173197 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-config-data\") pod \"keystone-db-sync-cp5z4\" (UID: \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\") " pod="openstack/keystone-db-sync-cp5z4" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.173294 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7m4n6\" (UniqueName: \"kubernetes.io/projected/6cd54e89-bf17-4216-955a-08f26a918d67-kube-api-access-7m4n6\") pod \"barbican-5eac-account-create-update-ggscp\" (UID: \"6cd54e89-bf17-4216-955a-08f26a918d67\") " pod="openstack/barbican-5eac-account-create-update-ggscp" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.173339 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51b69b53-dd5e-43e9-99c0-83c33e8e2168-operator-scripts\") pod \"heat-db-create-529rf\" (UID: \"51b69b53-dd5e-43e9-99c0-83c33e8e2168\") " pod="openstack/heat-db-create-529rf" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.173373 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7q9kq\" (UniqueName: \"kubernetes.io/projected/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-kube-api-access-7q9kq\") pod \"keystone-db-sync-cp5z4\" (UID: \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\") " pod="openstack/keystone-db-sync-cp5z4" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.173419 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-combined-ca-bundle\") pod \"keystone-db-sync-cp5z4\" (UID: \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\") " pod="openstack/keystone-db-sync-cp5z4" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.181362 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-3073-account-create-update-wvxth"] Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.193935 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1-operator-scripts\") pod \"heat-6c48-account-create-update-nw88h\" (UID: \"fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1\") " pod="openstack/heat-6c48-account-create-update-nw88h" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.199836 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cd54e89-bf17-4216-955a-08f26a918d67-operator-scripts\") pod \"barbican-5eac-account-create-update-ggscp\" (UID: \"6cd54e89-bf17-4216-955a-08f26a918d67\") " pod="openstack/barbican-5eac-account-create-update-ggscp" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.202071 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51b69b53-dd5e-43e9-99c0-83c33e8e2168-operator-scripts\") pod \"heat-db-create-529rf\" (UID: \"51b69b53-dd5e-43e9-99c0-83c33e8e2168\") " pod="openstack/heat-db-create-529rf" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.279375 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4r2vt\" (UniqueName: \"kubernetes.io/projected/51b69b53-dd5e-43e9-99c0-83c33e8e2168-kube-api-access-4r2vt\") pod \"heat-db-create-529rf\" (UID: \"51b69b53-dd5e-43e9-99c0-83c33e8e2168\") " pod="openstack/heat-db-create-529rf" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.280927 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7q9kq\" (UniqueName: \"kubernetes.io/projected/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-kube-api-access-7q9kq\") pod \"keystone-db-sync-cp5z4\" (UID: \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\") " pod="openstack/keystone-db-sync-cp5z4" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.280974 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-combined-ca-bundle\") pod \"keystone-db-sync-cp5z4\" (UID: \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\") " pod="openstack/keystone-db-sync-cp5z4" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.280998 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7n2kg\" (UniqueName: \"kubernetes.io/projected/2e77e1e2-114e-44d3-8fc0-5c3d09e9179e-kube-api-access-7n2kg\") pod \"neutron-db-create-qkl46\" (UID: \"2e77e1e2-114e-44d3-8fc0-5c3d09e9179e\") " pod="openstack/neutron-db-create-qkl46" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.281073 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e77e1e2-114e-44d3-8fc0-5c3d09e9179e-operator-scripts\") pod \"neutron-db-create-qkl46\" (UID: \"2e77e1e2-114e-44d3-8fc0-5c3d09e9179e\") " pod="openstack/neutron-db-create-qkl46" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.281104 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz5p7\" (UniqueName: \"kubernetes.io/projected/83e3ec3d-0683-4f57-b926-fbbeb94957a8-kube-api-access-qz5p7\") pod \"neutron-3073-account-create-update-wvxth\" (UID: \"83e3ec3d-0683-4f57-b926-fbbeb94957a8\") " pod="openstack/neutron-3073-account-create-update-wvxth" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.281146 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83e3ec3d-0683-4f57-b926-fbbeb94957a8-operator-scripts\") pod \"neutron-3073-account-create-update-wvxth\" (UID: \"83e3ec3d-0683-4f57-b926-fbbeb94957a8\") " pod="openstack/neutron-3073-account-create-update-wvxth" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.281214 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-config-data\") pod \"keystone-db-sync-cp5z4\" (UID: \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\") " pod="openstack/keystone-db-sync-cp5z4" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.422708 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7n2kg\" (UniqueName: \"kubernetes.io/projected/2e77e1e2-114e-44d3-8fc0-5c3d09e9179e-kube-api-access-7n2kg\") pod \"neutron-db-create-qkl46\" (UID: \"2e77e1e2-114e-44d3-8fc0-5c3d09e9179e\") " pod="openstack/neutron-db-create-qkl46" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.422806 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e77e1e2-114e-44d3-8fc0-5c3d09e9179e-operator-scripts\") pod \"neutron-db-create-qkl46\" (UID: \"2e77e1e2-114e-44d3-8fc0-5c3d09e9179e\") " pod="openstack/neutron-db-create-qkl46" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.422838 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz5p7\" (UniqueName: \"kubernetes.io/projected/83e3ec3d-0683-4f57-b926-fbbeb94957a8-kube-api-access-qz5p7\") pod \"neutron-3073-account-create-update-wvxth\" (UID: \"83e3ec3d-0683-4f57-b926-fbbeb94957a8\") " pod="openstack/neutron-3073-account-create-update-wvxth" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.422881 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83e3ec3d-0683-4f57-b926-fbbeb94957a8-operator-scripts\") pod \"neutron-3073-account-create-update-wvxth\" (UID: \"83e3ec3d-0683-4f57-b926-fbbeb94957a8\") " pod="openstack/neutron-3073-account-create-update-wvxth" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.424189 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83e3ec3d-0683-4f57-b926-fbbeb94957a8-operator-scripts\") pod \"neutron-3073-account-create-update-wvxth\" (UID: \"83e3ec3d-0683-4f57-b926-fbbeb94957a8\") " pod="openstack/neutron-3073-account-create-update-wvxth" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.425008 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e77e1e2-114e-44d3-8fc0-5c3d09e9179e-operator-scripts\") pod \"neutron-db-create-qkl46\" (UID: \"2e77e1e2-114e-44d3-8fc0-5c3d09e9179e\") " pod="openstack/neutron-db-create-qkl46" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.498569 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kx2xr\" (UniqueName: \"kubernetes.io/projected/fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1-kube-api-access-kx2xr\") pod \"heat-6c48-account-create-update-nw88h\" (UID: \"fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1\") " pod="openstack/heat-6c48-account-create-update-nw88h" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.502593 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7m4n6\" (UniqueName: \"kubernetes.io/projected/6cd54e89-bf17-4216-955a-08f26a918d67-kube-api-access-7m4n6\") pod \"barbican-5eac-account-create-update-ggscp\" (UID: \"6cd54e89-bf17-4216-955a-08f26a918d67\") " pod="openstack/barbican-5eac-account-create-update-ggscp" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.502694 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7n2kg\" (UniqueName: \"kubernetes.io/projected/2e77e1e2-114e-44d3-8fc0-5c3d09e9179e-kube-api-access-7n2kg\") pod \"neutron-db-create-qkl46\" (UID: \"2e77e1e2-114e-44d3-8fc0-5c3d09e9179e\") " pod="openstack/neutron-db-create-qkl46" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.503271 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-combined-ca-bundle\") pod \"keystone-db-sync-cp5z4\" (UID: \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\") " pod="openstack/keystone-db-sync-cp5z4" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.503926 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-config-data\") pod \"keystone-db-sync-cp5z4\" (UID: \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\") " pod="openstack/keystone-db-sync-cp5z4" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.508868 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz5p7\" (UniqueName: \"kubernetes.io/projected/83e3ec3d-0683-4f57-b926-fbbeb94957a8-kube-api-access-qz5p7\") pod \"neutron-3073-account-create-update-wvxth\" (UID: \"83e3ec3d-0683-4f57-b926-fbbeb94957a8\") " pod="openstack/neutron-3073-account-create-update-wvxth" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.530641 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7q9kq\" (UniqueName: \"kubernetes.io/projected/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-kube-api-access-7q9kq\") pod \"keystone-db-sync-cp5z4\" (UID: \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\") " pod="openstack/keystone-db-sync-cp5z4" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.737936 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-6c48-account-create-update-nw88h" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.764925 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-529rf" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.765412 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5eac-account-create-update-ggscp" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.776664 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-cp5z4" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.793206 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qkl46" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.926531 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3073-account-create-update-wvxth" Jan 27 12:50:24 crc kubenswrapper[4900]: I0127 12:50:24.933798 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-c7be-account-create-update-wtq6h"] Jan 27 12:50:25 crc kubenswrapper[4900]: I0127 12:50:25.050094 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-8xhbm"] Jan 27 12:50:25 crc kubenswrapper[4900]: W0127 12:50:25.139554 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10a04b5b_42d9_488f_b5b3_21e86476918a.slice/crio-55037096f7895d4f872232feb0f56f6ac70c2b4975f8ce58f3d79034a0147e49 WatchSource:0}: Error finding container 55037096f7895d4f872232feb0f56f6ac70c2b4975f8ce58f3d79034a0147e49: Status 404 returned error can't find the container with id 55037096f7895d4f872232feb0f56f6ac70c2b4975f8ce58f3d79034a0147e49 Jan 27 12:50:25 crc kubenswrapper[4900]: I0127 12:50:25.220513 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-8xhbm" event={"ID":"10a04b5b-42d9-488f-b5b3-21e86476918a","Type":"ContainerStarted","Data":"55037096f7895d4f872232feb0f56f6ac70c2b4975f8ce58f3d79034a0147e49"} Jan 27 12:50:25 crc kubenswrapper[4900]: I0127 12:50:25.233682 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-sfjk4"] Jan 27 12:50:25 crc kubenswrapper[4900]: I0127 12:50:25.234699 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3","Type":"ContainerStarted","Data":"323e0ce3ef73c62b6fdac4ce0f5fca0394ba874c1073988404cc6411856535d7"} Jan 27 12:50:25 crc kubenswrapper[4900]: I0127 12:50:25.238458 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c7be-account-create-update-wtq6h" event={"ID":"6a470e2e-c53a-4c18-ae8f-236d3d410455","Type":"ContainerStarted","Data":"351a4b50da1a7a3116833b222cc199aa2e98acf2df775f48a258b0c9e69d493b"} Jan 27 12:50:25 crc kubenswrapper[4900]: I0127 12:50:25.473213 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-5eac-account-create-update-ggscp"] Jan 27 12:50:25 crc kubenswrapper[4900]: W0127 12:50:25.515869 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd54e89_bf17_4216_955a_08f26a918d67.slice/crio-2eae2e660d16fd873ab6a9155a8671dc1630b684d374fb5a6e62f19199611b68 WatchSource:0}: Error finding container 2eae2e660d16fd873ab6a9155a8671dc1630b684d374fb5a6e62f19199611b68: Status 404 returned error can't find the container with id 2eae2e660d16fd873ab6a9155a8671dc1630b684d374fb5a6e62f19199611b68 Jan 27 12:50:25 crc kubenswrapper[4900]: I0127 12:50:25.646887 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-6c48-account-create-update-nw88h"] Jan 27 12:50:26 crc kubenswrapper[4900]: W0127 12:50:26.043516 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf9e2ef8_0e37_438f_b2bd_c1c050c4064b.slice/crio-25303ee8a941aa36e1f2f7725f74e0a0501693758ef612ae7342240ba7c5fa36 WatchSource:0}: Error finding container 25303ee8a941aa36e1f2f7725f74e0a0501693758ef612ae7342240ba7c5fa36: Status 404 returned error can't find the container with id 25303ee8a941aa36e1f2f7725f74e0a0501693758ef612ae7342240ba7c5fa36 Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.046860 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-cp5z4"] Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.064505 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-qkl46"] Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.088608 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-529rf"] Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.106822 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-3073-account-create-update-wvxth"] Jan 27 12:50:26 crc kubenswrapper[4900]: W0127 12:50:26.112375 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51b69b53_dd5e_43e9_99c0_83c33e8e2168.slice/crio-e3ee61596af5ccea08ff1c54860397a84c4cd17d3ab043bf475051206ad4fff8 WatchSource:0}: Error finding container e3ee61596af5ccea08ff1c54860397a84c4cd17d3ab043bf475051206ad4fff8: Status 404 returned error can't find the container with id e3ee61596af5ccea08ff1c54860397a84c4cd17d3ab043bf475051206ad4fff8 Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.270340 4900 generic.go:334] "Generic (PLEG): container finished" podID="6a470e2e-c53a-4c18-ae8f-236d3d410455" containerID="66cd4b87dbcfbd189d10c0564ac6e1988a9f728e9b390784896b00721150a212" exitCode=0 Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.270417 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c7be-account-create-update-wtq6h" event={"ID":"6a470e2e-c53a-4c18-ae8f-236d3d410455","Type":"ContainerDied","Data":"66cd4b87dbcfbd189d10c0564ac6e1988a9f728e9b390784896b00721150a212"} Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.275690 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-8xhbm" event={"ID":"10a04b5b-42d9-488f-b5b3-21e86476918a","Type":"ContainerStarted","Data":"d88d6a7adfbc0aeb34e6d996817676771fe1bb48c853ece486365197f4bab755"} Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.278304 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-qkl46" event={"ID":"2e77e1e2-114e-44d3-8fc0-5c3d09e9179e","Type":"ContainerStarted","Data":"b1bf372df2114651b7d1b90630f2e8c6d369c478cd30bf0b15a74e4b2b9ed6bd"} Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.285431 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3","Type":"ContainerStarted","Data":"2b2c5fc163b9013bf7b3e9de7d65cf63b2a0917fd600cea3f2b6d3be41f17be7"} Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.289882 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-529rf" event={"ID":"51b69b53-dd5e-43e9-99c0-83c33e8e2168","Type":"ContainerStarted","Data":"e3ee61596af5ccea08ff1c54860397a84c4cd17d3ab043bf475051206ad4fff8"} Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.301472 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-6c48-account-create-update-nw88h" event={"ID":"fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1","Type":"ContainerStarted","Data":"13889eab61a640be1b81555b976028b81125604a3d51dfc72ef84d8214c3d80e"} Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.301540 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-6c48-account-create-update-nw88h" event={"ID":"fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1","Type":"ContainerStarted","Data":"2331862fe676891771a3856e849f783db97eff7e7d75a02f570d8b922f61467a"} Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.303408 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-cp5z4" event={"ID":"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b","Type":"ContainerStarted","Data":"25303ee8a941aa36e1f2f7725f74e0a0501693758ef612ae7342240ba7c5fa36"} Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.304599 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3073-account-create-update-wvxth" event={"ID":"83e3ec3d-0683-4f57-b926-fbbeb94957a8","Type":"ContainerStarted","Data":"4e77d5e2effdf42634f695d4bbf508b4f03227fdff14b068a4648d56b9cf78d8"} Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.307992 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5eac-account-create-update-ggscp" event={"ID":"6cd54e89-bf17-4216-955a-08f26a918d67","Type":"ContainerStarted","Data":"0056d78e57274c7e0851e0b759847b1981314cfd2e17ab468966641560115762"} Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.308090 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5eac-account-create-update-ggscp" event={"ID":"6cd54e89-bf17-4216-955a-08f26a918d67","Type":"ContainerStarted","Data":"2eae2e660d16fd873ab6a9155a8671dc1630b684d374fb5a6e62f19199611b68"} Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.314223 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sfjk4" event={"ID":"dc96b906-bcb7-451e-bab7-8b4c00058ed9","Type":"ContainerStarted","Data":"bc62fef879308289b145fb901cc7256ca0e2f2847a617b712c5e76f1ce39d8e4"} Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.314310 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sfjk4" event={"ID":"dc96b906-bcb7-451e-bab7-8b4c00058ed9","Type":"ContainerStarted","Data":"9cc2c22b0b775f8001befb6e67d1eb482751792f6c5834fbe18164f1e08f8ba3"} Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.326620 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-8xhbm" podStartSLOduration=3.326593371 podStartE2EDuration="3.326593371s" podCreationTimestamp="2026-01-27 12:50:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:50:26.314003414 +0000 UTC m=+1453.551031624" watchObservedRunningTime="2026-01-27 12:50:26.326593371 +0000 UTC m=+1453.563621581" Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.358523 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=19.358499972 podStartE2EDuration="19.358499972s" podCreationTimestamp="2026-01-27 12:50:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:50:26.341362272 +0000 UTC m=+1453.578390492" watchObservedRunningTime="2026-01-27 12:50:26.358499972 +0000 UTC m=+1453.595528182" Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.396773 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-5eac-account-create-update-ggscp" podStartSLOduration=3.396751187 podStartE2EDuration="3.396751187s" podCreationTimestamp="2026-01-27 12:50:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:50:26.378221706 +0000 UTC m=+1453.615249946" watchObservedRunningTime="2026-01-27 12:50:26.396751187 +0000 UTC m=+1453.633779397" Jan 27 12:50:26 crc kubenswrapper[4900]: I0127 12:50:26.405678 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-6c48-account-create-update-nw88h" podStartSLOduration=3.405661446 podStartE2EDuration="3.405661446s" podCreationTimestamp="2026-01-27 12:50:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:50:26.392488982 +0000 UTC m=+1453.629517212" watchObservedRunningTime="2026-01-27 12:50:26.405661446 +0000 UTC m=+1453.642689646" Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.327740 4900 generic.go:334] "Generic (PLEG): container finished" podID="10a04b5b-42d9-488f-b5b3-21e86476918a" containerID="d88d6a7adfbc0aeb34e6d996817676771fe1bb48c853ece486365197f4bab755" exitCode=0 Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.328026 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-8xhbm" event={"ID":"10a04b5b-42d9-488f-b5b3-21e86476918a","Type":"ContainerDied","Data":"d88d6a7adfbc0aeb34e6d996817676771fe1bb48c853ece486365197f4bab755"} Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.332252 4900 generic.go:334] "Generic (PLEG): container finished" podID="2e77e1e2-114e-44d3-8fc0-5c3d09e9179e" containerID="fd673af18ccdf9f88a43ee5a7d8f4906e58279b428d93efb0404f39e168bcfcd" exitCode=0 Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.332345 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-qkl46" event={"ID":"2e77e1e2-114e-44d3-8fc0-5c3d09e9179e","Type":"ContainerDied","Data":"fd673af18ccdf9f88a43ee5a7d8f4906e58279b428d93efb0404f39e168bcfcd"} Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.334429 4900 generic.go:334] "Generic (PLEG): container finished" podID="83e3ec3d-0683-4f57-b926-fbbeb94957a8" containerID="64398728ab6d462ef88ef1539188265db676a604efdf20888df073cc53a5ad60" exitCode=0 Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.334506 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3073-account-create-update-wvxth" event={"ID":"83e3ec3d-0683-4f57-b926-fbbeb94957a8","Type":"ContainerDied","Data":"64398728ab6d462ef88ef1539188265db676a604efdf20888df073cc53a5ad60"} Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.336495 4900 generic.go:334] "Generic (PLEG): container finished" podID="6cd54e89-bf17-4216-955a-08f26a918d67" containerID="0056d78e57274c7e0851e0b759847b1981314cfd2e17ab468966641560115762" exitCode=0 Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.336571 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5eac-account-create-update-ggscp" event={"ID":"6cd54e89-bf17-4216-955a-08f26a918d67","Type":"ContainerDied","Data":"0056d78e57274c7e0851e0b759847b1981314cfd2e17ab468966641560115762"} Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.338165 4900 generic.go:334] "Generic (PLEG): container finished" podID="dc96b906-bcb7-451e-bab7-8b4c00058ed9" containerID="bc62fef879308289b145fb901cc7256ca0e2f2847a617b712c5e76f1ce39d8e4" exitCode=0 Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.338222 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sfjk4" event={"ID":"dc96b906-bcb7-451e-bab7-8b4c00058ed9","Type":"ContainerDied","Data":"bc62fef879308289b145fb901cc7256ca0e2f2847a617b712c5e76f1ce39d8e4"} Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.348546 4900 generic.go:334] "Generic (PLEG): container finished" podID="51b69b53-dd5e-43e9-99c0-83c33e8e2168" containerID="336b4567761cf6502313a127525bcce7e632a8eb755abd50086fc60bf5e4d619" exitCode=0 Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.348636 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-529rf" event={"ID":"51b69b53-dd5e-43e9-99c0-83c33e8e2168","Type":"ContainerDied","Data":"336b4567761cf6502313a127525bcce7e632a8eb755abd50086fc60bf5e4d619"} Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.384807 4900 generic.go:334] "Generic (PLEG): container finished" podID="fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1" containerID="13889eab61a640be1b81555b976028b81125604a3d51dfc72ef84d8214c3d80e" exitCode=0 Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.386200 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-6c48-account-create-update-nw88h" event={"ID":"fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1","Type":"ContainerDied","Data":"13889eab61a640be1b81555b976028b81125604a3d51dfc72ef84d8214c3d80e"} Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.970276 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sfjk4" Jan 27 12:50:27 crc kubenswrapper[4900]: I0127 12:50:27.974046 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c7be-account-create-update-wtq6h" Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.038402 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6lcd\" (UniqueName: \"kubernetes.io/projected/6a470e2e-c53a-4c18-ae8f-236d3d410455-kube-api-access-m6lcd\") pod \"6a470e2e-c53a-4c18-ae8f-236d3d410455\" (UID: \"6a470e2e-c53a-4c18-ae8f-236d3d410455\") " Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.038724 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc96b906-bcb7-451e-bab7-8b4c00058ed9-operator-scripts\") pod \"dc96b906-bcb7-451e-bab7-8b4c00058ed9\" (UID: \"dc96b906-bcb7-451e-bab7-8b4c00058ed9\") " Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.038868 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a470e2e-c53a-4c18-ae8f-236d3d410455-operator-scripts\") pod \"6a470e2e-c53a-4c18-ae8f-236d3d410455\" (UID: \"6a470e2e-c53a-4c18-ae8f-236d3d410455\") " Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.038933 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llq5h\" (UniqueName: \"kubernetes.io/projected/dc96b906-bcb7-451e-bab7-8b4c00058ed9-kube-api-access-llq5h\") pod \"dc96b906-bcb7-451e-bab7-8b4c00058ed9\" (UID: \"dc96b906-bcb7-451e-bab7-8b4c00058ed9\") " Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.039774 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc96b906-bcb7-451e-bab7-8b4c00058ed9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dc96b906-bcb7-451e-bab7-8b4c00058ed9" (UID: "dc96b906-bcb7-451e-bab7-8b4c00058ed9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.040090 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a470e2e-c53a-4c18-ae8f-236d3d410455-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6a470e2e-c53a-4c18-ae8f-236d3d410455" (UID: "6a470e2e-c53a-4c18-ae8f-236d3d410455"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.060273 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a470e2e-c53a-4c18-ae8f-236d3d410455-kube-api-access-m6lcd" (OuterVolumeSpecName: "kube-api-access-m6lcd") pod "6a470e2e-c53a-4c18-ae8f-236d3d410455" (UID: "6a470e2e-c53a-4c18-ae8f-236d3d410455"). InnerVolumeSpecName "kube-api-access-m6lcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.063371 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc96b906-bcb7-451e-bab7-8b4c00058ed9-kube-api-access-llq5h" (OuterVolumeSpecName: "kube-api-access-llq5h") pod "dc96b906-bcb7-451e-bab7-8b4c00058ed9" (UID: "dc96b906-bcb7-451e-bab7-8b4c00058ed9"). InnerVolumeSpecName "kube-api-access-llq5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.143365 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a470e2e-c53a-4c18-ae8f-236d3d410455-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.143423 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llq5h\" (UniqueName: \"kubernetes.io/projected/dc96b906-bcb7-451e-bab7-8b4c00058ed9-kube-api-access-llq5h\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.143439 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6lcd\" (UniqueName: \"kubernetes.io/projected/6a470e2e-c53a-4c18-ae8f-236d3d410455-kube-api-access-m6lcd\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.143450 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc96b906-bcb7-451e-bab7-8b4c00058ed9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.389619 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.417921 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sfjk4" event={"ID":"dc96b906-bcb7-451e-bab7-8b4c00058ed9","Type":"ContainerDied","Data":"9cc2c22b0b775f8001befb6e67d1eb482751792f6c5834fbe18164f1e08f8ba3"} Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.417995 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9cc2c22b0b775f8001befb6e67d1eb482751792f6c5834fbe18164f1e08f8ba3" Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.418139 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sfjk4" Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.422401 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c7be-account-create-update-wtq6h" event={"ID":"6a470e2e-c53a-4c18-ae8f-236d3d410455","Type":"ContainerDied","Data":"351a4b50da1a7a3116833b222cc199aa2e98acf2df775f48a258b0c9e69d493b"} Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.422456 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="351a4b50da1a7a3116833b222cc199aa2e98acf2df775f48a258b0c9e69d493b" Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.422635 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c7be-account-create-update-wtq6h" Jan 27 12:50:28 crc kubenswrapper[4900]: I0127 12:50:28.960547 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5eac-account-create-update-ggscp" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.063813 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7m4n6\" (UniqueName: \"kubernetes.io/projected/6cd54e89-bf17-4216-955a-08f26a918d67-kube-api-access-7m4n6\") pod \"6cd54e89-bf17-4216-955a-08f26a918d67\" (UID: \"6cd54e89-bf17-4216-955a-08f26a918d67\") " Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.064075 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cd54e89-bf17-4216-955a-08f26a918d67-operator-scripts\") pod \"6cd54e89-bf17-4216-955a-08f26a918d67\" (UID: \"6cd54e89-bf17-4216-955a-08f26a918d67\") " Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.065650 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cd54e89-bf17-4216-955a-08f26a918d67-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6cd54e89-bf17-4216-955a-08f26a918d67" (UID: "6cd54e89-bf17-4216-955a-08f26a918d67"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.087649 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cd54e89-bf17-4216-955a-08f26a918d67-kube-api-access-7m4n6" (OuterVolumeSpecName: "kube-api-access-7m4n6") pod "6cd54e89-bf17-4216-955a-08f26a918d67" (UID: "6cd54e89-bf17-4216-955a-08f26a918d67"). InnerVolumeSpecName "kube-api-access-7m4n6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.167937 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7m4n6\" (UniqueName: \"kubernetes.io/projected/6cd54e89-bf17-4216-955a-08f26a918d67-kube-api-access-7m4n6\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.167973 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cd54e89-bf17-4216-955a-08f26a918d67-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.276439 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3073-account-create-update-wvxth" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.279785 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-8xhbm" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.300023 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-6c48-account-create-update-nw88h" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.303807 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-529rf" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.320900 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qkl46" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.437226 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-529rf" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.437504 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-529rf" event={"ID":"51b69b53-dd5e-43e9-99c0-83c33e8e2168","Type":"ContainerDied","Data":"e3ee61596af5ccea08ff1c54860397a84c4cd17d3ab043bf475051206ad4fff8"} Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.437530 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e3ee61596af5ccea08ff1c54860397a84c4cd17d3ab043bf475051206ad4fff8" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.441133 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-6c48-account-create-update-nw88h" event={"ID":"fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1","Type":"ContainerDied","Data":"2331862fe676891771a3856e849f783db97eff7e7d75a02f570d8b922f61467a"} Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.441192 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2331862fe676891771a3856e849f783db97eff7e7d75a02f570d8b922f61467a" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.441154 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-6c48-account-create-update-nw88h" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.444384 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-8xhbm" event={"ID":"10a04b5b-42d9-488f-b5b3-21e86476918a","Type":"ContainerDied","Data":"55037096f7895d4f872232feb0f56f6ac70c2b4975f8ce58f3d79034a0147e49"} Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.444988 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="55037096f7895d4f872232feb0f56f6ac70c2b4975f8ce58f3d79034a0147e49" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.444668 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-8xhbm" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.446840 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-qkl46" event={"ID":"2e77e1e2-114e-44d3-8fc0-5c3d09e9179e","Type":"ContainerDied","Data":"b1bf372df2114651b7d1b90630f2e8c6d369c478cd30bf0b15a74e4b2b9ed6bd"} Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.446872 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1bf372df2114651b7d1b90630f2e8c6d369c478cd30bf0b15a74e4b2b9ed6bd" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.446941 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qkl46" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.449458 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3073-account-create-update-wvxth" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.449479 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3073-account-create-update-wvxth" event={"ID":"83e3ec3d-0683-4f57-b926-fbbeb94957a8","Type":"ContainerDied","Data":"4e77d5e2effdf42634f695d4bbf508b4f03227fdff14b068a4648d56b9cf78d8"} Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.449517 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e77d5e2effdf42634f695d4bbf508b4f03227fdff14b068a4648d56b9cf78d8" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.451574 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5eac-account-create-update-ggscp" event={"ID":"6cd54e89-bf17-4216-955a-08f26a918d67","Type":"ContainerDied","Data":"2eae2e660d16fd873ab6a9155a8671dc1630b684d374fb5a6e62f19199611b68"} Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.451601 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2eae2e660d16fd873ab6a9155a8671dc1630b684d374fb5a6e62f19199611b68" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.451636 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5eac-account-create-update-ggscp" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.475256 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7n2kg\" (UniqueName: \"kubernetes.io/projected/2e77e1e2-114e-44d3-8fc0-5c3d09e9179e-kube-api-access-7n2kg\") pod \"2e77e1e2-114e-44d3-8fc0-5c3d09e9179e\" (UID: \"2e77e1e2-114e-44d3-8fc0-5c3d09e9179e\") " Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.475314 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lf54x\" (UniqueName: \"kubernetes.io/projected/10a04b5b-42d9-488f-b5b3-21e86476918a-kube-api-access-lf54x\") pod \"10a04b5b-42d9-488f-b5b3-21e86476918a\" (UID: \"10a04b5b-42d9-488f-b5b3-21e86476918a\") " Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.475432 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83e3ec3d-0683-4f57-b926-fbbeb94957a8-operator-scripts\") pod \"83e3ec3d-0683-4f57-b926-fbbeb94957a8\" (UID: \"83e3ec3d-0683-4f57-b926-fbbeb94957a8\") " Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.475460 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10a04b5b-42d9-488f-b5b3-21e86476918a-operator-scripts\") pod \"10a04b5b-42d9-488f-b5b3-21e86476918a\" (UID: \"10a04b5b-42d9-488f-b5b3-21e86476918a\") " Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.475510 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e77e1e2-114e-44d3-8fc0-5c3d09e9179e-operator-scripts\") pod \"2e77e1e2-114e-44d3-8fc0-5c3d09e9179e\" (UID: \"2e77e1e2-114e-44d3-8fc0-5c3d09e9179e\") " Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.475530 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4r2vt\" (UniqueName: \"kubernetes.io/projected/51b69b53-dd5e-43e9-99c0-83c33e8e2168-kube-api-access-4r2vt\") pod \"51b69b53-dd5e-43e9-99c0-83c33e8e2168\" (UID: \"51b69b53-dd5e-43e9-99c0-83c33e8e2168\") " Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.475580 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kx2xr\" (UniqueName: \"kubernetes.io/projected/fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1-kube-api-access-kx2xr\") pod \"fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1\" (UID: \"fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1\") " Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.475630 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1-operator-scripts\") pod \"fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1\" (UID: \"fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1\") " Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.475656 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qz5p7\" (UniqueName: \"kubernetes.io/projected/83e3ec3d-0683-4f57-b926-fbbeb94957a8-kube-api-access-qz5p7\") pod \"83e3ec3d-0683-4f57-b926-fbbeb94957a8\" (UID: \"83e3ec3d-0683-4f57-b926-fbbeb94957a8\") " Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.475684 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51b69b53-dd5e-43e9-99c0-83c33e8e2168-operator-scripts\") pod \"51b69b53-dd5e-43e9-99c0-83c33e8e2168\" (UID: \"51b69b53-dd5e-43e9-99c0-83c33e8e2168\") " Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.479155 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51b69b53-dd5e-43e9-99c0-83c33e8e2168-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "51b69b53-dd5e-43e9-99c0-83c33e8e2168" (UID: "51b69b53-dd5e-43e9-99c0-83c33e8e2168"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.480196 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10a04b5b-42d9-488f-b5b3-21e86476918a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "10a04b5b-42d9-488f-b5b3-21e86476918a" (UID: "10a04b5b-42d9-488f-b5b3-21e86476918a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.481701 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e77e1e2-114e-44d3-8fc0-5c3d09e9179e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2e77e1e2-114e-44d3-8fc0-5c3d09e9179e" (UID: "2e77e1e2-114e-44d3-8fc0-5c3d09e9179e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.482451 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83e3ec3d-0683-4f57-b926-fbbeb94957a8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "83e3ec3d-0683-4f57-b926-fbbeb94957a8" (UID: "83e3ec3d-0683-4f57-b926-fbbeb94957a8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.486013 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1" (UID: "fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.496764 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10a04b5b-42d9-488f-b5b3-21e86476918a-kube-api-access-lf54x" (OuterVolumeSpecName: "kube-api-access-lf54x") pod "10a04b5b-42d9-488f-b5b3-21e86476918a" (UID: "10a04b5b-42d9-488f-b5b3-21e86476918a"). InnerVolumeSpecName "kube-api-access-lf54x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.509399 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e77e1e2-114e-44d3-8fc0-5c3d09e9179e-kube-api-access-7n2kg" (OuterVolumeSpecName: "kube-api-access-7n2kg") pod "2e77e1e2-114e-44d3-8fc0-5c3d09e9179e" (UID: "2e77e1e2-114e-44d3-8fc0-5c3d09e9179e"). InnerVolumeSpecName "kube-api-access-7n2kg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.509920 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83e3ec3d-0683-4f57-b926-fbbeb94957a8-kube-api-access-qz5p7" (OuterVolumeSpecName: "kube-api-access-qz5p7") pod "83e3ec3d-0683-4f57-b926-fbbeb94957a8" (UID: "83e3ec3d-0683-4f57-b926-fbbeb94957a8"). InnerVolumeSpecName "kube-api-access-qz5p7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.509999 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1-kube-api-access-kx2xr" (OuterVolumeSpecName: "kube-api-access-kx2xr") pod "fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1" (UID: "fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1"). InnerVolumeSpecName "kube-api-access-kx2xr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.515293 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51b69b53-dd5e-43e9-99c0-83c33e8e2168-kube-api-access-4r2vt" (OuterVolumeSpecName: "kube-api-access-4r2vt") pod "51b69b53-dd5e-43e9-99c0-83c33e8e2168" (UID: "51b69b53-dd5e-43e9-99c0-83c33e8e2168"). InnerVolumeSpecName "kube-api-access-4r2vt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.580330 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.580367 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qz5p7\" (UniqueName: \"kubernetes.io/projected/83e3ec3d-0683-4f57-b926-fbbeb94957a8-kube-api-access-qz5p7\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.580378 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51b69b53-dd5e-43e9-99c0-83c33e8e2168-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.580426 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7n2kg\" (UniqueName: \"kubernetes.io/projected/2e77e1e2-114e-44d3-8fc0-5c3d09e9179e-kube-api-access-7n2kg\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.580439 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lf54x\" (UniqueName: \"kubernetes.io/projected/10a04b5b-42d9-488f-b5b3-21e86476918a-kube-api-access-lf54x\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.580450 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83e3ec3d-0683-4f57-b926-fbbeb94957a8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.580459 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10a04b5b-42d9-488f-b5b3-21e86476918a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.580469 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e77e1e2-114e-44d3-8fc0-5c3d09e9179e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.580480 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4r2vt\" (UniqueName: \"kubernetes.io/projected/51b69b53-dd5e-43e9-99c0-83c33e8e2168-kube-api-access-4r2vt\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:29 crc kubenswrapper[4900]: I0127 12:50:29.580487 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kx2xr\" (UniqueName: \"kubernetes.io/projected/fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1-kube-api-access-kx2xr\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:31 crc kubenswrapper[4900]: I0127 12:50:31.483861 4900 generic.go:334] "Generic (PLEG): container finished" podID="1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4" containerID="194d7724541532f8815232c97f649e2362bb78b3d29d8f1d12b016971da45504" exitCode=0 Jan 27 12:50:31 crc kubenswrapper[4900]: I0127 12:50:31.483963 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5rc2l" event={"ID":"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4","Type":"ContainerDied","Data":"194d7724541532f8815232c97f649e2362bb78b3d29d8f1d12b016971da45504"} Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.088790 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5rc2l" Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.277882 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-config-data\") pod \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.277933 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5cncb\" (UniqueName: \"kubernetes.io/projected/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-kube-api-access-5cncb\") pod \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.277966 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-combined-ca-bundle\") pod \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.278070 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-db-sync-config-data\") pod \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\" (UID: \"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4\") " Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.283207 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4" (UID: "1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.283281 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-kube-api-access-5cncb" (OuterVolumeSpecName: "kube-api-access-5cncb") pod "1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4" (UID: "1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4"). InnerVolumeSpecName "kube-api-access-5cncb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.314689 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4" (UID: "1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.341336 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-config-data" (OuterVolumeSpecName: "config-data") pod "1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4" (UID: "1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.381324 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.381371 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5cncb\" (UniqueName: \"kubernetes.io/projected/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-kube-api-access-5cncb\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.381386 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.381400 4900 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.514822 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5rc2l" event={"ID":"1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4","Type":"ContainerDied","Data":"1a690051342ea055ee2054665a6f8d7ddfc6985242f5849445c91becdcd971ef"} Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.514864 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a690051342ea055ee2054665a6f8d7ddfc6985242f5849445c91becdcd971ef" Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.514923 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5rc2l" Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.518996 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-cp5z4" event={"ID":"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b","Type":"ContainerStarted","Data":"575c3ce2fc52aa5ad7d9c254701fe8fd0ecb1b1cffc53e229d74a5d284fcfcec"} Jan 27 12:50:33 crc kubenswrapper[4900]: I0127 12:50:33.550564 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-cp5z4" podStartSLOduration=3.020926872 podStartE2EDuration="9.550538609s" podCreationTimestamp="2026-01-27 12:50:24 +0000 UTC" firstStartedPulling="2026-01-27 12:50:26.059780543 +0000 UTC m=+1453.296808753" lastFinishedPulling="2026-01-27 12:50:32.58939228 +0000 UTC m=+1459.826420490" observedRunningTime="2026-01-27 12:50:33.540855387 +0000 UTC m=+1460.777883587" watchObservedRunningTime="2026-01-27 12:50:33.550538609 +0000 UTC m=+1460.787566819" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.178203 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-2df2t"] Jan 27 12:50:34 crc kubenswrapper[4900]: E0127 12:50:34.179150 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51b69b53-dd5e-43e9-99c0-83c33e8e2168" containerName="mariadb-database-create" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179167 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="51b69b53-dd5e-43e9-99c0-83c33e8e2168" containerName="mariadb-database-create" Jan 27 12:50:34 crc kubenswrapper[4900]: E0127 12:50:34.179186 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cd54e89-bf17-4216-955a-08f26a918d67" containerName="mariadb-account-create-update" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179192 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cd54e89-bf17-4216-955a-08f26a918d67" containerName="mariadb-account-create-update" Jan 27 12:50:34 crc kubenswrapper[4900]: E0127 12:50:34.179200 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1" containerName="mariadb-account-create-update" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179206 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1" containerName="mariadb-account-create-update" Jan 27 12:50:34 crc kubenswrapper[4900]: E0127 12:50:34.179226 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10a04b5b-42d9-488f-b5b3-21e86476918a" containerName="mariadb-database-create" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179232 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="10a04b5b-42d9-488f-b5b3-21e86476918a" containerName="mariadb-database-create" Jan 27 12:50:34 crc kubenswrapper[4900]: E0127 12:50:34.179243 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83e3ec3d-0683-4f57-b926-fbbeb94957a8" containerName="mariadb-account-create-update" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179248 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="83e3ec3d-0683-4f57-b926-fbbeb94957a8" containerName="mariadb-account-create-update" Jan 27 12:50:34 crc kubenswrapper[4900]: E0127 12:50:34.179263 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e77e1e2-114e-44d3-8fc0-5c3d09e9179e" containerName="mariadb-database-create" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179268 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e77e1e2-114e-44d3-8fc0-5c3d09e9179e" containerName="mariadb-database-create" Jan 27 12:50:34 crc kubenswrapper[4900]: E0127 12:50:34.179277 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a470e2e-c53a-4c18-ae8f-236d3d410455" containerName="mariadb-account-create-update" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179284 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a470e2e-c53a-4c18-ae8f-236d3d410455" containerName="mariadb-account-create-update" Jan 27 12:50:34 crc kubenswrapper[4900]: E0127 12:50:34.179292 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc96b906-bcb7-451e-bab7-8b4c00058ed9" containerName="mariadb-database-create" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179298 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc96b906-bcb7-451e-bab7-8b4c00058ed9" containerName="mariadb-database-create" Jan 27 12:50:34 crc kubenswrapper[4900]: E0127 12:50:34.179313 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4" containerName="glance-db-sync" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179319 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4" containerName="glance-db-sync" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179539 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="83e3ec3d-0683-4f57-b926-fbbeb94957a8" containerName="mariadb-account-create-update" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179566 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="10a04b5b-42d9-488f-b5b3-21e86476918a" containerName="mariadb-database-create" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179576 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e77e1e2-114e-44d3-8fc0-5c3d09e9179e" containerName="mariadb-database-create" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179590 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4" containerName="glance-db-sync" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179604 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a470e2e-c53a-4c18-ae8f-236d3d410455" containerName="mariadb-account-create-update" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179613 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cd54e89-bf17-4216-955a-08f26a918d67" containerName="mariadb-account-create-update" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179625 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="51b69b53-dd5e-43e9-99c0-83c33e8e2168" containerName="mariadb-database-create" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179633 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc96b906-bcb7-451e-bab7-8b4c00058ed9" containerName="mariadb-database-create" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.179647 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1" containerName="mariadb-account-create-update" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.180978 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.194512 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-2df2t"] Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.310576 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-ovsdbserver-sb\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.310675 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-ovsdbserver-nb\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.310735 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-dns-svc\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.310794 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-config\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.310826 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9t242\" (UniqueName: \"kubernetes.io/projected/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-kube-api-access-9t242\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.414230 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-config\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.414354 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9t242\" (UniqueName: \"kubernetes.io/projected/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-kube-api-access-9t242\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.414543 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-ovsdbserver-sb\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.414592 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-ovsdbserver-nb\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.414652 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-dns-svc\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.415907 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-ovsdbserver-nb\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.415910 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-config\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.416022 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-dns-svc\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.416706 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-ovsdbserver-sb\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.446022 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9t242\" (UniqueName: \"kubernetes.io/projected/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-kube-api-access-9t242\") pod \"dnsmasq-dns-5b946c75cc-2df2t\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:34 crc kubenswrapper[4900]: I0127 12:50:34.516354 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:35 crc kubenswrapper[4900]: I0127 12:50:35.277845 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-2df2t"] Jan 27 12:50:35 crc kubenswrapper[4900]: W0127 12:50:35.283863 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33d17d42_5a4f_4e80_a7ff_4a13a7042d3e.slice/crio-1f8051f0f51e3af7e1febd2495fddbfeefdc184285b614d1f40a888becca7071 WatchSource:0}: Error finding container 1f8051f0f51e3af7e1febd2495fddbfeefdc184285b614d1f40a888becca7071: Status 404 returned error can't find the container with id 1f8051f0f51e3af7e1febd2495fddbfeefdc184285b614d1f40a888becca7071 Jan 27 12:50:35 crc kubenswrapper[4900]: I0127 12:50:35.544465 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" event={"ID":"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e","Type":"ContainerStarted","Data":"1f8051f0f51e3af7e1febd2495fddbfeefdc184285b614d1f40a888becca7071"} Jan 27 12:50:36 crc kubenswrapper[4900]: I0127 12:50:36.571044 4900 generic.go:334] "Generic (PLEG): container finished" podID="33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" containerID="25d6548a13fad44ca49b158bfb4f1b00daab5fae3f5ef9e44946771d2e9e2a12" exitCode=0 Jan 27 12:50:36 crc kubenswrapper[4900]: I0127 12:50:36.571203 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" event={"ID":"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e","Type":"ContainerDied","Data":"25d6548a13fad44ca49b158bfb4f1b00daab5fae3f5ef9e44946771d2e9e2a12"} Jan 27 12:50:37 crc kubenswrapper[4900]: I0127 12:50:37.589308 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" event={"ID":"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e","Type":"ContainerStarted","Data":"78867ec7aaceb54b41123ba9850837539f721c27d9726e1494c0d8aaed1f0b41"} Jan 27 12:50:37 crc kubenswrapper[4900]: I0127 12:50:37.590972 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:37 crc kubenswrapper[4900]: I0127 12:50:37.614607 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" podStartSLOduration=3.614585491 podStartE2EDuration="3.614585491s" podCreationTimestamp="2026-01-27 12:50:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:50:37.61179439 +0000 UTC m=+1464.848822600" watchObservedRunningTime="2026-01-27 12:50:37.614585491 +0000 UTC m=+1464.851613701" Jan 27 12:50:38 crc kubenswrapper[4900]: I0127 12:50:38.388819 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:38 crc kubenswrapper[4900]: I0127 12:50:38.396585 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:38 crc kubenswrapper[4900]: I0127 12:50:38.609243 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 27 12:50:40 crc kubenswrapper[4900]: I0127 12:50:40.625929 4900 generic.go:334] "Generic (PLEG): container finished" podID="cf9e2ef8-0e37-438f-b2bd-c1c050c4064b" containerID="575c3ce2fc52aa5ad7d9c254701fe8fd0ecb1b1cffc53e229d74a5d284fcfcec" exitCode=0 Jan 27 12:50:40 crc kubenswrapper[4900]: I0127 12:50:40.626270 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-cp5z4" event={"ID":"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b","Type":"ContainerDied","Data":"575c3ce2fc52aa5ad7d9c254701fe8fd0ecb1b1cffc53e229d74a5d284fcfcec"} Jan 27 12:50:41 crc kubenswrapper[4900]: I0127 12:50:41.219856 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:50:41 crc kubenswrapper[4900]: I0127 12:50:41.228883 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/0c2f90a4-baa0-4eeb-a797-3664c306818b-etc-swift\") pod \"swift-storage-0\" (UID: \"0c2f90a4-baa0-4eeb-a797-3664c306818b\") " pod="openstack/swift-storage-0" Jan 27 12:50:41 crc kubenswrapper[4900]: I0127 12:50:41.433208 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 27 12:50:42 crc kubenswrapper[4900]: W0127 12:50:42.066775 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0c2f90a4_baa0_4eeb_a797_3664c306818b.slice/crio-2af3a68fe547b15e4b21fa30487205b992939bd61ac9dac4c87d5c89011422d2 WatchSource:0}: Error finding container 2af3a68fe547b15e4b21fa30487205b992939bd61ac9dac4c87d5c89011422d2: Status 404 returned error can't find the container with id 2af3a68fe547b15e4b21fa30487205b992939bd61ac9dac4c87d5c89011422d2 Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.080210 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.118914 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-cp5z4" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.242334 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7q9kq\" (UniqueName: \"kubernetes.io/projected/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-kube-api-access-7q9kq\") pod \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\" (UID: \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\") " Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.242454 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-combined-ca-bundle\") pod \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\" (UID: \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\") " Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.242556 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-config-data\") pod \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\" (UID: \"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b\") " Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.249516 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-kube-api-access-7q9kq" (OuterVolumeSpecName: "kube-api-access-7q9kq") pod "cf9e2ef8-0e37-438f-b2bd-c1c050c4064b" (UID: "cf9e2ef8-0e37-438f-b2bd-c1c050c4064b"). InnerVolumeSpecName "kube-api-access-7q9kq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.273227 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cf9e2ef8-0e37-438f-b2bd-c1c050c4064b" (UID: "cf9e2ef8-0e37-438f-b2bd-c1c050c4064b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.297137 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-config-data" (OuterVolumeSpecName: "config-data") pod "cf9e2ef8-0e37-438f-b2bd-c1c050c4064b" (UID: "cf9e2ef8-0e37-438f-b2bd-c1c050c4064b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.346315 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.346358 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.346379 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7q9kq\" (UniqueName: \"kubernetes.io/projected/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b-kube-api-access-7q9kq\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.650166 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-cp5z4" event={"ID":"cf9e2ef8-0e37-438f-b2bd-c1c050c4064b","Type":"ContainerDied","Data":"25303ee8a941aa36e1f2f7725f74e0a0501693758ef612ae7342240ba7c5fa36"} Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.650577 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25303ee8a941aa36e1f2f7725f74e0a0501693758ef612ae7342240ba7c5fa36" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.650446 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-cp5z4" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.651721 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"2af3a68fe547b15e4b21fa30487205b992939bd61ac9dac4c87d5c89011422d2"} Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.953381 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-2df2t"] Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.953674 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" podUID="33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" containerName="dnsmasq-dns" containerID="cri-o://78867ec7aaceb54b41123ba9850837539f721c27d9726e1494c0d8aaed1f0b41" gracePeriod=10 Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.955383 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.980136 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-8f2g8"] Jan 27 12:50:42 crc kubenswrapper[4900]: E0127 12:50:42.980736 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf9e2ef8-0e37-438f-b2bd-c1c050c4064b" containerName="keystone-db-sync" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.980759 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf9e2ef8-0e37-438f-b2bd-c1c050c4064b" containerName="keystone-db-sync" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.980999 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf9e2ef8-0e37-438f-b2bd-c1c050c4064b" containerName="keystone-db-sync" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.981781 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.986893 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.987169 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.987349 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.987462 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 27 12:50:42 crc kubenswrapper[4900]: I0127 12:50:42.987627 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-k2fbf" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.012178 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8f2g8"] Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.051307 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-784f69c749-rffb8"] Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.064605 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.078522 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-combined-ca-bundle\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.078600 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-ovsdbserver-sb\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.078627 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jmtb\" (UniqueName: \"kubernetes.io/projected/aa7737bb-7773-450c-9fe8-f5e27d501de8-kube-api-access-7jmtb\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.078650 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-config-data\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.078671 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-ovsdbserver-nb\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.078700 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-fernet-keys\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.078737 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-credential-keys\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.078780 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fh9t5\" (UniqueName: \"kubernetes.io/projected/819e890f-4f20-4b02-b41d-88cc6da2dee3-kube-api-access-fh9t5\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.078865 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-scripts\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.078904 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-dns-svc\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.078991 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-config\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.096134 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-784f69c749-rffb8"] Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.178912 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-nwqdh"] Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.181504 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-scripts\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.181593 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-dns-svc\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.181701 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-config\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.181768 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-combined-ca-bundle\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.181826 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-ovsdbserver-sb\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.181861 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jmtb\" (UniqueName: \"kubernetes.io/projected/aa7737bb-7773-450c-9fe8-f5e27d501de8-kube-api-access-7jmtb\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.181889 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-config-data\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.181917 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-ovsdbserver-nb\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.181958 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-fernet-keys\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.181993 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-credential-keys\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.182032 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fh9t5\" (UniqueName: \"kubernetes.io/projected/819e890f-4f20-4b02-b41d-88cc6da2dee3-kube-api-access-fh9t5\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.186988 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-nwqdh" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.187215 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-ovsdbserver-sb\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.189670 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-config\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.193671 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-ovsdbserver-nb\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.198324 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-config-data\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.198533 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-credential-keys\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.204701 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-dns-svc\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.210684 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-gwwtc" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.210985 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.222650 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-fernet-keys\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.226841 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fh9t5\" (UniqueName: \"kubernetes.io/projected/819e890f-4f20-4b02-b41d-88cc6da2dee3-kube-api-access-fh9t5\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.227431 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-scripts\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.228545 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-combined-ca-bundle\") pod \"keystone-bootstrap-8f2g8\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.238993 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jmtb\" (UniqueName: \"kubernetes.io/projected/aa7737bb-7773-450c-9fe8-f5e27d501de8-kube-api-access-7jmtb\") pod \"dnsmasq-dns-784f69c749-rffb8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.291223 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-nwqdh"] Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.301736 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5375696-4614-47d4-a8aa-2a98bdd0bd17-combined-ca-bundle\") pod \"heat-db-sync-nwqdh\" (UID: \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\") " pod="openstack/heat-db-sync-nwqdh" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.301782 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnfxx\" (UniqueName: \"kubernetes.io/projected/b5375696-4614-47d4-a8aa-2a98bdd0bd17-kube-api-access-fnfxx\") pod \"heat-db-sync-nwqdh\" (UID: \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\") " pod="openstack/heat-db-sync-nwqdh" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.301851 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5375696-4614-47d4-a8aa-2a98bdd0bd17-config-data\") pod \"heat-db-sync-nwqdh\" (UID: \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\") " pod="openstack/heat-db-sync-nwqdh" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.316346 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.371846 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.403396 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnfxx\" (UniqueName: \"kubernetes.io/projected/b5375696-4614-47d4-a8aa-2a98bdd0bd17-kube-api-access-fnfxx\") pod \"heat-db-sync-nwqdh\" (UID: \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\") " pod="openstack/heat-db-sync-nwqdh" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.409730 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5375696-4614-47d4-a8aa-2a98bdd0bd17-config-data\") pod \"heat-db-sync-nwqdh\" (UID: \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\") " pod="openstack/heat-db-sync-nwqdh" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.410156 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5375696-4614-47d4-a8aa-2a98bdd0bd17-combined-ca-bundle\") pod \"heat-db-sync-nwqdh\" (UID: \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\") " pod="openstack/heat-db-sync-nwqdh" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.436623 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5375696-4614-47d4-a8aa-2a98bdd0bd17-combined-ca-bundle\") pod \"heat-db-sync-nwqdh\" (UID: \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\") " pod="openstack/heat-db-sync-nwqdh" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.436955 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5375696-4614-47d4-a8aa-2a98bdd0bd17-config-data\") pod \"heat-db-sync-nwqdh\" (UID: \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\") " pod="openstack/heat-db-sync-nwqdh" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.495938 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnfxx\" (UniqueName: \"kubernetes.io/projected/b5375696-4614-47d4-a8aa-2a98bdd0bd17-kube-api-access-fnfxx\") pod \"heat-db-sync-nwqdh\" (UID: \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\") " pod="openstack/heat-db-sync-nwqdh" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.698004 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-nwqdh" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.797754 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-2rf6d"] Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.799845 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.820032 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.820311 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.820852 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-ls755" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.829783 4900 generic.go:334] "Generic (PLEG): container finished" podID="33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" containerID="78867ec7aaceb54b41123ba9850837539f721c27d9726e1494c0d8aaed1f0b41" exitCode=0 Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.829865 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" event={"ID":"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e","Type":"ContainerDied","Data":"78867ec7aaceb54b41123ba9850837539f721c27d9726e1494c0d8aaed1f0b41"} Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.838186 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-2rf6d"] Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.888239 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-dr6wx"] Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.889678 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-dr6wx" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.905005 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-pqf6l" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.905544 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.929132 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-dr6wx"] Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.934021 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4hps\" (UniqueName: \"kubernetes.io/projected/f3331ea7-d796-459a-9e9d-6f744ba8822b-kube-api-access-j4hps\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.934118 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f3331ea7-d796-459a-9e9d-6f744ba8822b-etc-machine-id\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.934242 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8c4pj\" (UniqueName: \"kubernetes.io/projected/ed853af8-ff05-4908-a82f-deceefe54dad-kube-api-access-8c4pj\") pod \"barbican-db-sync-dr6wx\" (UID: \"ed853af8-ff05-4908-a82f-deceefe54dad\") " pod="openstack/barbican-db-sync-dr6wx" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.934276 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-scripts\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.934326 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-combined-ca-bundle\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.934368 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ed853af8-ff05-4908-a82f-deceefe54dad-db-sync-config-data\") pod \"barbican-db-sync-dr6wx\" (UID: \"ed853af8-ff05-4908-a82f-deceefe54dad\") " pod="openstack/barbican-db-sync-dr6wx" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.934567 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed853af8-ff05-4908-a82f-deceefe54dad-combined-ca-bundle\") pod \"barbican-db-sync-dr6wx\" (UID: \"ed853af8-ff05-4908-a82f-deceefe54dad\") " pod="openstack/barbican-db-sync-dr6wx" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.934593 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-config-data\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.934642 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-db-sync-config-data\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:43 crc kubenswrapper[4900]: I0127 12:50:43.962942 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-784f69c749-rffb8"] Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.038248 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed853af8-ff05-4908-a82f-deceefe54dad-combined-ca-bundle\") pod \"barbican-db-sync-dr6wx\" (UID: \"ed853af8-ff05-4908-a82f-deceefe54dad\") " pod="openstack/barbican-db-sync-dr6wx" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.038323 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-config-data\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.038378 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-db-sync-config-data\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.038404 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4hps\" (UniqueName: \"kubernetes.io/projected/f3331ea7-d796-459a-9e9d-6f744ba8822b-kube-api-access-j4hps\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.038442 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f3331ea7-d796-459a-9e9d-6f744ba8822b-etc-machine-id\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.038539 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8c4pj\" (UniqueName: \"kubernetes.io/projected/ed853af8-ff05-4908-a82f-deceefe54dad-kube-api-access-8c4pj\") pod \"barbican-db-sync-dr6wx\" (UID: \"ed853af8-ff05-4908-a82f-deceefe54dad\") " pod="openstack/barbican-db-sync-dr6wx" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.038574 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-scripts\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.038610 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-combined-ca-bundle\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.038658 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ed853af8-ff05-4908-a82f-deceefe54dad-db-sync-config-data\") pod \"barbican-db-sync-dr6wx\" (UID: \"ed853af8-ff05-4908-a82f-deceefe54dad\") " pod="openstack/barbican-db-sync-dr6wx" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.041090 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f3331ea7-d796-459a-9e9d-6f744ba8822b-etc-machine-id\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.058547 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed853af8-ff05-4908-a82f-deceefe54dad-combined-ca-bundle\") pod \"barbican-db-sync-dr6wx\" (UID: \"ed853af8-ff05-4908-a82f-deceefe54dad\") " pod="openstack/barbican-db-sync-dr6wx" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.064763 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ed853af8-ff05-4908-a82f-deceefe54dad-db-sync-config-data\") pod \"barbican-db-sync-dr6wx\" (UID: \"ed853af8-ff05-4908-a82f-deceefe54dad\") " pod="openstack/barbican-db-sync-dr6wx" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.064938 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-scripts\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.066384 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-db-sync-config-data\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.066394 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-combined-ca-bundle\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.069434 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-config-data\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.080688 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-d4wfs"] Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.090525 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-d4wfs" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.100970 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4hps\" (UniqueName: \"kubernetes.io/projected/f3331ea7-d796-459a-9e9d-6f744ba8822b-kube-api-access-j4hps\") pod \"cinder-db-sync-2rf6d\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.109279 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.109557 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.109737 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-55hcd" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.116640 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8c4pj\" (UniqueName: \"kubernetes.io/projected/ed853af8-ff05-4908-a82f-deceefe54dad-kube-api-access-8c4pj\") pod \"barbican-db-sync-dr6wx\" (UID: \"ed853af8-ff05-4908-a82f-deceefe54dad\") " pod="openstack/barbican-db-sync-dr6wx" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.141653 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92j94\" (UniqueName: \"kubernetes.io/projected/f3932d13-5d69-455f-88db-3978da7a8c00-kube-api-access-92j94\") pod \"neutron-db-sync-d4wfs\" (UID: \"f3932d13-5d69-455f-88db-3978da7a8c00\") " pod="openstack/neutron-db-sync-d4wfs" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.149387 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f84976bdf-cgxdj"] Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.158124 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3932d13-5d69-455f-88db-3978da7a8c00-combined-ca-bundle\") pod \"neutron-db-sync-d4wfs\" (UID: \"f3932d13-5d69-455f-88db-3978da7a8c00\") " pod="openstack/neutron-db-sync-d4wfs" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.159531 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f3932d13-5d69-455f-88db-3978da7a8c00-config\") pod \"neutron-db-sync-d4wfs\" (UID: \"f3932d13-5d69-455f-88db-3978da7a8c00\") " pod="openstack/neutron-db-sync-d4wfs" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.162873 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.249090 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-4kmq8"] Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.262636 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.269315 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrlzt\" (UniqueName: \"kubernetes.io/projected/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-kube-api-access-lrlzt\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.269383 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-config\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.269426 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92j94\" (UniqueName: \"kubernetes.io/projected/f3932d13-5d69-455f-88db-3978da7a8c00-kube-api-access-92j94\") pod \"neutron-db-sync-d4wfs\" (UID: \"f3932d13-5d69-455f-88db-3978da7a8c00\") " pod="openstack/neutron-db-sync-d4wfs" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.269470 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3932d13-5d69-455f-88db-3978da7a8c00-combined-ca-bundle\") pod \"neutron-db-sync-d4wfs\" (UID: \"f3932d13-5d69-455f-88db-3978da7a8c00\") " pod="openstack/neutron-db-sync-d4wfs" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.269523 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-ovsdbserver-nb\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.269569 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f3932d13-5d69-455f-88db-3978da7a8c00-config\") pod \"neutron-db-sync-d4wfs\" (UID: \"f3932d13-5d69-455f-88db-3978da7a8c00\") " pod="openstack/neutron-db-sync-d4wfs" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.274491 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-ovsdbserver-sb\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.274877 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-dns-svc\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.290283 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.290959 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-b6zdc" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.291180 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.317637 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/f3932d13-5d69-455f-88db-3978da7a8c00-config\") pod \"neutron-db-sync-d4wfs\" (UID: \"f3932d13-5d69-455f-88db-3978da7a8c00\") " pod="openstack/neutron-db-sync-d4wfs" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.318335 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3932d13-5d69-455f-88db-3978da7a8c00-combined-ca-bundle\") pod \"neutron-db-sync-d4wfs\" (UID: \"f3932d13-5d69-455f-88db-3978da7a8c00\") " pod="openstack/neutron-db-sync-d4wfs" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.325141 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.340140 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-dr6wx" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.360372 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-d4wfs"] Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.368199 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92j94\" (UniqueName: \"kubernetes.io/projected/f3932d13-5d69-455f-88db-3978da7a8c00-kube-api-access-92j94\") pod \"neutron-db-sync-d4wfs\" (UID: \"f3932d13-5d69-455f-88db-3978da7a8c00\") " pod="openstack/neutron-db-sync-d4wfs" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.385745 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5c58344-109f-4fd1-948c-39ef56d4b0eb-logs\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.385827 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-ovsdbserver-nb\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.385937 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfxdf\" (UniqueName: \"kubernetes.io/projected/a5c58344-109f-4fd1-948c-39ef56d4b0eb-kube-api-access-rfxdf\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.385978 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-scripts\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.386044 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-ovsdbserver-sb\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.386274 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-dns-svc\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.386305 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-config-data\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.386472 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrlzt\" (UniqueName: \"kubernetes.io/projected/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-kube-api-access-lrlzt\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.386522 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-config\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.386648 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-combined-ca-bundle\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.387861 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-ovsdbserver-nb\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.398400 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-ovsdbserver-sb\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.398894 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-dns-svc\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.399644 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-config\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.427793 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-4kmq8"] Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.446527 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f84976bdf-cgxdj"] Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.456099 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrlzt\" (UniqueName: \"kubernetes.io/projected/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-kube-api-access-lrlzt\") pod \"dnsmasq-dns-f84976bdf-cgxdj\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.472819 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.486738 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.490284 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-combined-ca-bundle\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.490419 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5c58344-109f-4fd1-948c-39ef56d4b0eb-logs\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.490487 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfxdf\" (UniqueName: \"kubernetes.io/projected/a5c58344-109f-4fd1-948c-39ef56d4b0eb-kube-api-access-rfxdf\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.490524 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-scripts\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.490702 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-config-data\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.491827 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5c58344-109f-4fd1-948c-39ef56d4b0eb-logs\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.492093 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-d4wfs" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.497321 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.497714 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.499328 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.499645 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-6vqzq" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.516861 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-scripts\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.577293 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-config-data\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.587412 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfxdf\" (UniqueName: \"kubernetes.io/projected/a5c58344-109f-4fd1-948c-39ef56d4b0eb-kube-api-access-rfxdf\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.595878 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.605646 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-combined-ca-bundle\") pod \"placement-db-sync-4kmq8\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.620495 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.620984 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.621087 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-logs\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.621259 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.621658 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.621735 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-config-data\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.621784 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnt9z\" (UniqueName: \"kubernetes.io/projected/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-kube-api-access-pnt9z\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.622013 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-scripts\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.666544 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.667164 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.669749 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.669888 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.674912 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.675362 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.728976 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.729158 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.729187 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.729270 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtpwz\" (UniqueName: \"kubernetes.io/projected/f23369b5-8053-430d-a8cb-af18fa1b976d-kube-api-access-qtpwz\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.729334 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.729383 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-config-data\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.729400 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f23369b5-8053-430d-a8cb-af18fa1b976d-logs\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.729427 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnt9z\" (UniqueName: \"kubernetes.io/projected/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-kube-api-access-pnt9z\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.729583 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-scripts\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.729623 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.729657 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.729817 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.729915 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.729958 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f23369b5-8053-430d-a8cb-af18fa1b976d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.729990 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-logs\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.730019 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.731565 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.739768 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.740434 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-logs\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.742798 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4kmq8" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.745750 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.745798 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.745894 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/95185e520e2875ca4693f40e4e3857c43a9e41c071319588428e729bb9badc1a/globalmount\"" pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.747351 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-scripts\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.748923 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-config-data\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.772653 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.773805 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnt9z\" (UniqueName: \"kubernetes.io/projected/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-kube-api-access-pnt9z\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.833128 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f23369b5-8053-430d-a8cb-af18fa1b976d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.833184 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.833278 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.833319 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.833358 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtpwz\" (UniqueName: \"kubernetes.io/projected/f23369b5-8053-430d-a8cb-af18fa1b976d-kube-api-access-qtpwz\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.833401 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f23369b5-8053-430d-a8cb-af18fa1b976d-logs\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.833474 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.833502 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.834040 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f23369b5-8053-430d-a8cb-af18fa1b976d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.834531 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f23369b5-8053-430d-a8cb-af18fa1b976d-logs\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.838677 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.843725 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.862239 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.867082 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.867431 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/4b03b1e0d87c93578458a398975bf619ff063e17c81725aa528a6bf239df8b6c/globalmount\"" pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.899842 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtpwz\" (UniqueName: \"kubernetes.io/projected/f23369b5-8053-430d-a8cb-af18fa1b976d-kube-api-access-qtpwz\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.911561 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.915770 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") pod \"glance-default-external-api-0\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.916935 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" event={"ID":"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e","Type":"ContainerDied","Data":"1f8051f0f51e3af7e1febd2495fddbfeefdc184285b614d1f40a888becca7071"} Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.917319 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.933571 4900 scope.go:117] "RemoveContainer" containerID="78867ec7aaceb54b41123ba9850837539f721c27d9726e1494c0d8aaed1f0b41" Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.935989 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-ovsdbserver-nb\") pod \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.956187 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9t242\" (UniqueName: \"kubernetes.io/projected/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-kube-api-access-9t242\") pod \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.956294 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-ovsdbserver-sb\") pod \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.956318 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-config\") pod \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " Jan 27 12:50:44 crc kubenswrapper[4900]: I0127 12:50:44.956377 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-dns-svc\") pod \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\" (UID: \"33d17d42-5a4f-4e80-a7ff-4a13a7042d3e\") " Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.020843 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-kube-api-access-9t242" (OuterVolumeSpecName: "kube-api-access-9t242") pod "33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" (UID: "33d17d42-5a4f-4e80-a7ff-4a13a7042d3e"). InnerVolumeSpecName "kube-api-access-9t242". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.035600 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" (UID: "33d17d42-5a4f-4e80-a7ff-4a13a7042d3e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.053834 4900 scope.go:117] "RemoveContainer" containerID="25d6548a13fad44ca49b158bfb4f1b00daab5fae3f5ef9e44946771d2e9e2a12" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.069179 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.080001 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.080034 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9t242\" (UniqueName: \"kubernetes.io/projected/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-kube-api-access-9t242\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.117773 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" (UID: "33d17d42-5a4f-4e80-a7ff-4a13a7042d3e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.159747 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") pod \"glance-default-internal-api-0\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.183072 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.183327 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8f2g8"] Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.192832 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-config" (OuterVolumeSpecName: "config") pod "33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" (UID: "33d17d42-5a4f-4e80-a7ff-4a13a7042d3e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.204178 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-784f69c749-rffb8"] Jan 27 12:50:45 crc kubenswrapper[4900]: W0127 12:50:45.242418 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod819e890f_4f20_4b02_b41d_88cc6da2dee3.slice/crio-e265ee93f2cc27c6d80ec0a7bed100aae04558d597b6533c72b328af5de46406 WatchSource:0}: Error finding container e265ee93f2cc27c6d80ec0a7bed100aae04558d597b6533c72b328af5de46406: Status 404 returned error can't find the container with id e265ee93f2cc27c6d80ec0a7bed100aae04558d597b6533c72b328af5de46406 Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.290073 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.299213 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" (UID: "33d17d42-5a4f-4e80-a7ff-4a13a7042d3e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.325038 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-nwqdh"] Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.394929 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.420883 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.448711 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:50:45 crc kubenswrapper[4900]: E0127 12:50:45.449369 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" containerName="dnsmasq-dns" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.449391 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" containerName="dnsmasq-dns" Jan 27 12:50:45 crc kubenswrapper[4900]: E0127 12:50:45.449426 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" containerName="init" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.449435 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" containerName="init" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.449729 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" containerName="dnsmasq-dns" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.452292 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.460497 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.470763 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.476481 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.600185 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-scripts\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.600264 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-run-httpd\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.600304 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nczbn\" (UniqueName: \"kubernetes.io/projected/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-kube-api-access-nczbn\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.600422 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.600531 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-config-data\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.600654 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.600739 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-log-httpd\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.676114 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-2df2t"] Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.703069 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-scripts\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.703134 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-run-httpd\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.703169 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nczbn\" (UniqueName: \"kubernetes.io/projected/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-kube-api-access-nczbn\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.707038 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.707213 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-config-data\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.707440 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.707564 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-log-httpd\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.708134 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-log-httpd\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.708363 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-run-httpd\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.713068 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-scripts\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.713947 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-2df2t"] Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.768918 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.769286 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.769730 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-config-data\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.785414 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nczbn\" (UniqueName: \"kubernetes.io/projected/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-kube-api-access-nczbn\") pod \"ceilometer-0\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.788242 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-2rf6d"] Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.807315 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-dr6wx"] Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.949344 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-784f69c749-rffb8" event={"ID":"aa7737bb-7773-450c-9fe8-f5e27d501de8","Type":"ContainerStarted","Data":"f7f5b44b6ad2432403c82c3fff49a2752b4bdd5936d245bb40ad24ba7dc8dd54"} Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.964348 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.973696 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8f2g8" event={"ID":"819e890f-4f20-4b02-b41d-88cc6da2dee3","Type":"ContainerStarted","Data":"e265ee93f2cc27c6d80ec0a7bed100aae04558d597b6533c72b328af5de46406"} Jan 27 12:50:45 crc kubenswrapper[4900]: I0127 12:50:45.984772 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-nwqdh" event={"ID":"b5375696-4614-47d4-a8aa-2a98bdd0bd17","Type":"ContainerStarted","Data":"6af01d0ff9437057bc43a241cc0816f10eb5343b21dbf3c72793b69ccb703aa6"} Jan 27 12:50:46 crc kubenswrapper[4900]: I0127 12:50:46.233643 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-d4wfs"] Jan 27 12:50:46 crc kubenswrapper[4900]: I0127 12:50:46.264142 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f84976bdf-cgxdj"] Jan 27 12:50:46 crc kubenswrapper[4900]: I0127 12:50:46.299510 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-4kmq8"] Jan 27 12:50:46 crc kubenswrapper[4900]: I0127 12:50:46.653555 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" path="/var/lib/kubelet/pods/33d17d42-5a4f-4e80-a7ff-4a13a7042d3e/volumes" Jan 27 12:50:46 crc kubenswrapper[4900]: I0127 12:50:46.654823 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:50:46 crc kubenswrapper[4900]: I0127 12:50:46.654874 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:50:46 crc kubenswrapper[4900]: I0127 12:50:46.654894 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:50:46 crc kubenswrapper[4900]: I0127 12:50:46.858235 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:50:47 crc kubenswrapper[4900]: I0127 12:50:47.037359 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8f2g8" event={"ID":"819e890f-4f20-4b02-b41d-88cc6da2dee3","Type":"ContainerStarted","Data":"9de16403e647a789a56826b863a8d06a397726a07eaee105988d975a31657665"} Jan 27 12:50:47 crc kubenswrapper[4900]: I0127 12:50:47.286935 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:50:47 crc kubenswrapper[4900]: I0127 12:50:47.499428 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-8f2g8" podStartSLOduration=5.499400237 podStartE2EDuration="5.499400237s" podCreationTimestamp="2026-01-27 12:50:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:50:47.477457127 +0000 UTC m=+1474.714485337" watchObservedRunningTime="2026-01-27 12:50:47.499400237 +0000 UTC m=+1474.736428457" Jan 27 12:50:47 crc kubenswrapper[4900]: W0127 12:50:47.975165 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5be1ee3b_28f0_4dd0_83cb_c0f7ce2093a7.slice/crio-483bca978bf3feb121ae1c234de9916bcfde0d3cbf4324fd748626f5c775885f WatchSource:0}: Error finding container 483bca978bf3feb121ae1c234de9916bcfde0d3cbf4324fd748626f5c775885f: Status 404 returned error can't find the container with id 483bca978bf3feb121ae1c234de9916bcfde0d3cbf4324fd748626f5c775885f Jan 27 12:50:47 crc kubenswrapper[4900]: W0127 12:50:47.985473 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3331ea7_d796_459a_9e9d_6f744ba8822b.slice/crio-d0f00e23799dcec62273e3e0f763d5497430c94f1c62c277fe27ef1314a0e2d2 WatchSource:0}: Error finding container d0f00e23799dcec62273e3e0f763d5497430c94f1c62c277fe27ef1314a0e2d2: Status 404 returned error can't find the container with id d0f00e23799dcec62273e3e0f763d5497430c94f1c62c277fe27ef1314a0e2d2 Jan 27 12:50:47 crc kubenswrapper[4900]: W0127 12:50:47.996043 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9988606_d1b6_4ef7_acd1_a73f41f1dc92.slice/crio-20e73e44c1f24e748abed0883fa6080671dc572403d65928b6eaa2bcdac499ad WatchSource:0}: Error finding container 20e73e44c1f24e748abed0883fa6080671dc572403d65928b6eaa2bcdac499ad: Status 404 returned error can't find the container with id 20e73e44c1f24e748abed0883fa6080671dc572403d65928b6eaa2bcdac499ad Jan 27 12:50:48 crc kubenswrapper[4900]: W0127 12:50:48.018388 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf23369b5_8053_430d_a8cb_af18fa1b976d.slice/crio-fb0d531c6d33a82f23ebb11a2c2379aac208f1ccc1cebb5292c3ba8b41262d89 WatchSource:0}: Error finding container fb0d531c6d33a82f23ebb11a2c2379aac208f1ccc1cebb5292c3ba8b41262d89: Status 404 returned error can't find the container with id fb0d531c6d33a82f23ebb11a2c2379aac208f1ccc1cebb5292c3ba8b41262d89 Jan 27 12:50:48 crc kubenswrapper[4900]: I0127 12:50:48.075273 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f23369b5-8053-430d-a8cb-af18fa1b976d","Type":"ContainerStarted","Data":"fb0d531c6d33a82f23ebb11a2c2379aac208f1ccc1cebb5292c3ba8b41262d89"} Jan 27 12:50:48 crc kubenswrapper[4900]: I0127 12:50:48.076963 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-d4wfs" event={"ID":"f3932d13-5d69-455f-88db-3978da7a8c00","Type":"ContainerStarted","Data":"24fe9718e9288540c5fb3dd61e4f915bc8116944fc67d34a84d0242c89169c8e"} Jan 27 12:50:48 crc kubenswrapper[4900]: I0127 12:50:48.096565 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-dr6wx" event={"ID":"ed853af8-ff05-4908-a82f-deceefe54dad","Type":"ContainerStarted","Data":"23fc0e28876ac33d0f10f484db5639018b61c9f0d3b12d67bf67c5c2d47b59a6"} Jan 27 12:50:48 crc kubenswrapper[4900]: I0127 12:50:48.098624 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4kmq8" event={"ID":"a5c58344-109f-4fd1-948c-39ef56d4b0eb","Type":"ContainerStarted","Data":"08eeeb7298ae337718e96c66b933c1383c59fc4430d48e34fa1d24d44a5b4cd6"} Jan 27 12:50:48 crc kubenswrapper[4900]: I0127 12:50:48.099741 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-2rf6d" event={"ID":"f3331ea7-d796-459a-9e9d-6f744ba8822b","Type":"ContainerStarted","Data":"d0f00e23799dcec62273e3e0f763d5497430c94f1c62c277fe27ef1314a0e2d2"} Jan 27 12:50:48 crc kubenswrapper[4900]: I0127 12:50:48.113754 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b9988606-d1b6-4ef7-acd1-a73f41f1dc92","Type":"ContainerStarted","Data":"20e73e44c1f24e748abed0883fa6080671dc572403d65928b6eaa2bcdac499ad"} Jan 27 12:50:48 crc kubenswrapper[4900]: I0127 12:50:48.133289 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" event={"ID":"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7","Type":"ContainerStarted","Data":"483bca978bf3feb121ae1c234de9916bcfde0d3cbf4324fd748626f5c775885f"} Jan 27 12:50:48 crc kubenswrapper[4900]: I0127 12:50:48.656527 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:50:49 crc kubenswrapper[4900]: I0127 12:50:49.179359 4900 generic.go:334] "Generic (PLEG): container finished" podID="5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" containerID="560ed17ecfcf39ac8497c44933ccd5aecfba05ffea2010b93bf724e1200e1451" exitCode=0 Jan 27 12:50:49 crc kubenswrapper[4900]: I0127 12:50:49.179723 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" event={"ID":"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7","Type":"ContainerDied","Data":"560ed17ecfcf39ac8497c44933ccd5aecfba05ffea2010b93bf724e1200e1451"} Jan 27 12:50:49 crc kubenswrapper[4900]: I0127 12:50:49.186641 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"1d6ac589b1046b88b79ac70b59f76d0b20309d5fc47ba197e03d42c318c0c75a"} Jan 27 12:50:49 crc kubenswrapper[4900]: I0127 12:50:49.191439 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed","Type":"ContainerStarted","Data":"ebab9cbb7c23697eda46b670ed9e929b66b719ceb4ed7fcfcfe16397ade522f3"} Jan 27 12:50:49 crc kubenswrapper[4900]: I0127 12:50:49.210938 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-d4wfs" event={"ID":"f3932d13-5d69-455f-88db-3978da7a8c00","Type":"ContainerStarted","Data":"2cdcc5f6b97f6d2c57e295057c54e9dbf3f2c114cb50cf9a4ad28c5343df0fbd"} Jan 27 12:50:49 crc kubenswrapper[4900]: I0127 12:50:49.224208 4900 generic.go:334] "Generic (PLEG): container finished" podID="aa7737bb-7773-450c-9fe8-f5e27d501de8" containerID="d6ef9ce26f937bf1d45efcec5644f6dc4f6df7f606bc6380b240a845182e9286" exitCode=0 Jan 27 12:50:49 crc kubenswrapper[4900]: I0127 12:50:49.224270 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-784f69c749-rffb8" event={"ID":"aa7737bb-7773-450c-9fe8-f5e27d501de8","Type":"ContainerDied","Data":"d6ef9ce26f937bf1d45efcec5644f6dc4f6df7f606bc6380b240a845182e9286"} Jan 27 12:50:49 crc kubenswrapper[4900]: I0127 12:50:49.249861 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-d4wfs" podStartSLOduration=6.249834404 podStartE2EDuration="6.249834404s" podCreationTimestamp="2026-01-27 12:50:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:50:49.239607806 +0000 UTC m=+1476.476636016" watchObservedRunningTime="2026-01-27 12:50:49.249834404 +0000 UTC m=+1476.486862604" Jan 27 12:50:49 crc kubenswrapper[4900]: I0127 12:50:49.518837 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5b946c75cc-2df2t" podUID="33d17d42-5a4f-4e80-a7ff-4a13a7042d3e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.176:5353: i/o timeout" Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.326231 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.332035 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-784f69c749-rffb8" event={"ID":"aa7737bb-7773-450c-9fe8-f5e27d501de8","Type":"ContainerDied","Data":"f7f5b44b6ad2432403c82c3fff49a2752b4bdd5936d245bb40ad24ba7dc8dd54"} Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.332112 4900 scope.go:117] "RemoveContainer" containerID="d6ef9ce26f937bf1d45efcec5644f6dc4f6df7f606bc6380b240a845182e9286" Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.334664 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b9988606-d1b6-4ef7-acd1-a73f41f1dc92","Type":"ContainerStarted","Data":"f5dcf7b70b726d88aa89919739136875cc1f1c9073d4a7f1469b0e8c522ba804"} Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.337998 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" event={"ID":"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7","Type":"ContainerStarted","Data":"3e2b8649ad7da075231edd580a45c063bd7b1e428382c6f4505ce930a213049b"} Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.339442 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.365374 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"ce643d9c73cf46a146095eec6de35d4e5d36225aad017188dfce76b2a31a9779"} Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.365456 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"7ecdad877107140b402df84641f5acc26cbd3e95988f92a365be7fce482ee0e0"} Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.373954 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f23369b5-8053-430d-a8cb-af18fa1b976d","Type":"ContainerStarted","Data":"be405de2e5815a7f4fadc67d0227ef27e616baa4ba588a0f8533a32667fc5fa9"} Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.390043 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" podStartSLOduration=7.390026282 podStartE2EDuration="7.390026282s" podCreationTimestamp="2026-01-27 12:50:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:50:50.385456319 +0000 UTC m=+1477.622484529" watchObservedRunningTime="2026-01-27 12:50:50.390026282 +0000 UTC m=+1477.627054492" Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.409478 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-dns-svc\") pod \"aa7737bb-7773-450c-9fe8-f5e27d501de8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.409604 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-ovsdbserver-sb\") pod \"aa7737bb-7773-450c-9fe8-f5e27d501de8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.409671 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-ovsdbserver-nb\") pod \"aa7737bb-7773-450c-9fe8-f5e27d501de8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.409811 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-config\") pod \"aa7737bb-7773-450c-9fe8-f5e27d501de8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.409849 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jmtb\" (UniqueName: \"kubernetes.io/projected/aa7737bb-7773-450c-9fe8-f5e27d501de8-kube-api-access-7jmtb\") pod \"aa7737bb-7773-450c-9fe8-f5e27d501de8\" (UID: \"aa7737bb-7773-450c-9fe8-f5e27d501de8\") " Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.422534 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa7737bb-7773-450c-9fe8-f5e27d501de8-kube-api-access-7jmtb" (OuterVolumeSpecName: "kube-api-access-7jmtb") pod "aa7737bb-7773-450c-9fe8-f5e27d501de8" (UID: "aa7737bb-7773-450c-9fe8-f5e27d501de8"). InnerVolumeSpecName "kube-api-access-7jmtb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.455045 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "aa7737bb-7773-450c-9fe8-f5e27d501de8" (UID: "aa7737bb-7773-450c-9fe8-f5e27d501de8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.466644 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "aa7737bb-7773-450c-9fe8-f5e27d501de8" (UID: "aa7737bb-7773-450c-9fe8-f5e27d501de8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.475265 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-config" (OuterVolumeSpecName: "config") pod "aa7737bb-7773-450c-9fe8-f5e27d501de8" (UID: "aa7737bb-7773-450c-9fe8-f5e27d501de8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.501909 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "aa7737bb-7773-450c-9fe8-f5e27d501de8" (UID: "aa7737bb-7773-450c-9fe8-f5e27d501de8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.512345 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.512387 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.512403 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.512415 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa7737bb-7773-450c-9fe8-f5e27d501de8-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:50 crc kubenswrapper[4900]: I0127 12:50:50.512428 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jmtb\" (UniqueName: \"kubernetes.io/projected/aa7737bb-7773-450c-9fe8-f5e27d501de8-kube-api-access-7jmtb\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:51 crc kubenswrapper[4900]: I0127 12:50:51.407296 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-784f69c749-rffb8" Jan 27 12:50:51 crc kubenswrapper[4900]: I0127 12:50:51.423083 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"4436bb8f7b29ce1d185d5322c3ec688432fce6aef609c291403a933a9ee36170"} Jan 27 12:50:51 crc kubenswrapper[4900]: I0127 12:50:51.543806 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-784f69c749-rffb8"] Jan 27 12:50:51 crc kubenswrapper[4900]: I0127 12:50:51.591862 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-784f69c749-rffb8"] Jan 27 12:50:52 crc kubenswrapper[4900]: I0127 12:50:52.459295 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f23369b5-8053-430d-a8cb-af18fa1b976d","Type":"ContainerStarted","Data":"acf5ac4cb56c0da001f79c933061459c23ecd037059ff68499ea6413d47fc9ea"} Jan 27 12:50:52 crc kubenswrapper[4900]: I0127 12:50:52.459386 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f23369b5-8053-430d-a8cb-af18fa1b976d" containerName="glance-log" containerID="cri-o://be405de2e5815a7f4fadc67d0227ef27e616baa4ba588a0f8533a32667fc5fa9" gracePeriod=30 Jan 27 12:50:52 crc kubenswrapper[4900]: I0127 12:50:52.459822 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f23369b5-8053-430d-a8cb-af18fa1b976d" containerName="glance-httpd" containerID="cri-o://acf5ac4cb56c0da001f79c933061459c23ecd037059ff68499ea6413d47fc9ea" gracePeriod=30 Jan 27 12:50:52 crc kubenswrapper[4900]: I0127 12:50:52.469718 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="b9988606-d1b6-4ef7-acd1-a73f41f1dc92" containerName="glance-log" containerID="cri-o://f5dcf7b70b726d88aa89919739136875cc1f1c9073d4a7f1469b0e8c522ba804" gracePeriod=30 Jan 27 12:50:52 crc kubenswrapper[4900]: I0127 12:50:52.470045 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b9988606-d1b6-4ef7-acd1-a73f41f1dc92","Type":"ContainerStarted","Data":"7fad735114113704d286c75bebec0ec12fd47d79383ab86e1e6481a77b1abca8"} Jan 27 12:50:52 crc kubenswrapper[4900]: I0127 12:50:52.470149 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="b9988606-d1b6-4ef7-acd1-a73f41f1dc92" containerName="glance-httpd" containerID="cri-o://7fad735114113704d286c75bebec0ec12fd47d79383ab86e1e6481a77b1abca8" gracePeriod=30 Jan 27 12:50:52 crc kubenswrapper[4900]: I0127 12:50:52.491582 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=9.491560855 podStartE2EDuration="9.491560855s" podCreationTimestamp="2026-01-27 12:50:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:50:52.486350073 +0000 UTC m=+1479.723378283" watchObservedRunningTime="2026-01-27 12:50:52.491560855 +0000 UTC m=+1479.728589055" Jan 27 12:50:52 crc kubenswrapper[4900]: I0127 12:50:52.524482 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa7737bb-7773-450c-9fe8-f5e27d501de8" path="/var/lib/kubelet/pods/aa7737bb-7773-450c-9fe8-f5e27d501de8/volumes" Jan 27 12:50:52 crc kubenswrapper[4900]: I0127 12:50:52.525051 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=9.525033051 podStartE2EDuration="9.525033051s" podCreationTimestamp="2026-01-27 12:50:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:50:52.507073618 +0000 UTC m=+1479.744101838" watchObservedRunningTime="2026-01-27 12:50:52.525033051 +0000 UTC m=+1479.762061261" Jan 27 12:50:53 crc kubenswrapper[4900]: I0127 12:50:53.488437 4900 generic.go:334] "Generic (PLEG): container finished" podID="f23369b5-8053-430d-a8cb-af18fa1b976d" containerID="acf5ac4cb56c0da001f79c933061459c23ecd037059ff68499ea6413d47fc9ea" exitCode=0 Jan 27 12:50:53 crc kubenswrapper[4900]: I0127 12:50:53.488824 4900 generic.go:334] "Generic (PLEG): container finished" podID="f23369b5-8053-430d-a8cb-af18fa1b976d" containerID="be405de2e5815a7f4fadc67d0227ef27e616baa4ba588a0f8533a32667fc5fa9" exitCode=143 Jan 27 12:50:53 crc kubenswrapper[4900]: I0127 12:50:53.488602 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f23369b5-8053-430d-a8cb-af18fa1b976d","Type":"ContainerDied","Data":"acf5ac4cb56c0da001f79c933061459c23ecd037059ff68499ea6413d47fc9ea"} Jan 27 12:50:53 crc kubenswrapper[4900]: I0127 12:50:53.488969 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f23369b5-8053-430d-a8cb-af18fa1b976d","Type":"ContainerDied","Data":"be405de2e5815a7f4fadc67d0227ef27e616baa4ba588a0f8533a32667fc5fa9"} Jan 27 12:50:53 crc kubenswrapper[4900]: I0127 12:50:53.495073 4900 generic.go:334] "Generic (PLEG): container finished" podID="b9988606-d1b6-4ef7-acd1-a73f41f1dc92" containerID="7fad735114113704d286c75bebec0ec12fd47d79383ab86e1e6481a77b1abca8" exitCode=0 Jan 27 12:50:53 crc kubenswrapper[4900]: I0127 12:50:53.495109 4900 generic.go:334] "Generic (PLEG): container finished" podID="b9988606-d1b6-4ef7-acd1-a73f41f1dc92" containerID="f5dcf7b70b726d88aa89919739136875cc1f1c9073d4a7f1469b0e8c522ba804" exitCode=143 Jan 27 12:50:53 crc kubenswrapper[4900]: I0127 12:50:53.495139 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b9988606-d1b6-4ef7-acd1-a73f41f1dc92","Type":"ContainerDied","Data":"7fad735114113704d286c75bebec0ec12fd47d79383ab86e1e6481a77b1abca8"} Jan 27 12:50:53 crc kubenswrapper[4900]: I0127 12:50:53.495173 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b9988606-d1b6-4ef7-acd1-a73f41f1dc92","Type":"ContainerDied","Data":"f5dcf7b70b726d88aa89919739136875cc1f1c9073d4a7f1469b0e8c522ba804"} Jan 27 12:50:54 crc kubenswrapper[4900]: I0127 12:50:54.598315 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:50:54 crc kubenswrapper[4900]: I0127 12:50:54.707116 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-z8pb8"] Jan 27 12:50:54 crc kubenswrapper[4900]: I0127 12:50:54.707445 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-z8pb8" podUID="67c4919e-b3dc-47ea-8728-03d0aaf07c18" containerName="dnsmasq-dns" containerID="cri-o://bd25891da5f2894b1dc28a7be89eaefdea2527622d3e2a18e60d2e172cbac996" gracePeriod=10 Jan 27 12:50:55 crc kubenswrapper[4900]: I0127 12:50:55.546793 4900 generic.go:334] "Generic (PLEG): container finished" podID="67c4919e-b3dc-47ea-8728-03d0aaf07c18" containerID="bd25891da5f2894b1dc28a7be89eaefdea2527622d3e2a18e60d2e172cbac996" exitCode=0 Jan 27 12:50:55 crc kubenswrapper[4900]: I0127 12:50:55.547088 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-z8pb8" event={"ID":"67c4919e-b3dc-47ea-8728-03d0aaf07c18","Type":"ContainerDied","Data":"bd25891da5f2894b1dc28a7be89eaefdea2527622d3e2a18e60d2e172cbac996"} Jan 27 12:50:56 crc kubenswrapper[4900]: I0127 12:50:56.079020 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-z8pb8" podUID="67c4919e-b3dc-47ea-8728-03d0aaf07c18" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.149:5353: connect: connection refused" Jan 27 12:50:56 crc kubenswrapper[4900]: I0127 12:50:56.561914 4900 generic.go:334] "Generic (PLEG): container finished" podID="819e890f-4f20-4b02-b41d-88cc6da2dee3" containerID="9de16403e647a789a56826b863a8d06a397726a07eaee105988d975a31657665" exitCode=0 Jan 27 12:50:56 crc kubenswrapper[4900]: I0127 12:50:56.562352 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8f2g8" event={"ID":"819e890f-4f20-4b02-b41d-88cc6da2dee3","Type":"ContainerDied","Data":"9de16403e647a789a56826b863a8d06a397726a07eaee105988d975a31657665"} Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.009680 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.017492 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056047 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-combined-ca-bundle\") pod \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056105 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-combined-ca-bundle\") pod \"f23369b5-8053-430d-a8cb-af18fa1b976d\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056136 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnt9z\" (UniqueName: \"kubernetes.io/projected/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-kube-api-access-pnt9z\") pod \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056180 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-scripts\") pod \"f23369b5-8053-430d-a8cb-af18fa1b976d\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056200 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f23369b5-8053-430d-a8cb-af18fa1b976d-httpd-run\") pod \"f23369b5-8053-430d-a8cb-af18fa1b976d\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056224 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qtpwz\" (UniqueName: \"kubernetes.io/projected/f23369b5-8053-430d-a8cb-af18fa1b976d-kube-api-access-qtpwz\") pod \"f23369b5-8053-430d-a8cb-af18fa1b976d\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056254 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-internal-tls-certs\") pod \"f23369b5-8053-430d-a8cb-af18fa1b976d\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056294 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-config-data\") pod \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056331 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-scripts\") pod \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056374 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f23369b5-8053-430d-a8cb-af18fa1b976d-logs\") pod \"f23369b5-8053-430d-a8cb-af18fa1b976d\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056469 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") pod \"f23369b5-8053-430d-a8cb-af18fa1b976d\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056495 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-logs\") pod \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056513 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-config-data\") pod \"f23369b5-8053-430d-a8cb-af18fa1b976d\" (UID: \"f23369b5-8053-430d-a8cb-af18fa1b976d\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056575 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") pod \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056596 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-httpd-run\") pod \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.056617 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-public-tls-certs\") pod \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\" (UID: \"b9988606-d1b6-4ef7-acd1-a73f41f1dc92\") " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.058506 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f23369b5-8053-430d-a8cb-af18fa1b976d-logs" (OuterVolumeSpecName: "logs") pod "f23369b5-8053-430d-a8cb-af18fa1b976d" (UID: "f23369b5-8053-430d-a8cb-af18fa1b976d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.059101 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "b9988606-d1b6-4ef7-acd1-a73f41f1dc92" (UID: "b9988606-d1b6-4ef7-acd1-a73f41f1dc92"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.059661 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-logs" (OuterVolumeSpecName: "logs") pod "b9988606-d1b6-4ef7-acd1-a73f41f1dc92" (UID: "b9988606-d1b6-4ef7-acd1-a73f41f1dc92"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.059962 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f23369b5-8053-430d-a8cb-af18fa1b976d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f23369b5-8053-430d-a8cb-af18fa1b976d" (UID: "f23369b5-8053-430d-a8cb-af18fa1b976d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.154852 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-kube-api-access-pnt9z" (OuterVolumeSpecName: "kube-api-access-pnt9z") pod "b9988606-d1b6-4ef7-acd1-a73f41f1dc92" (UID: "b9988606-d1b6-4ef7-acd1-a73f41f1dc92"). InnerVolumeSpecName "kube-api-access-pnt9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.156312 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f23369b5-8053-430d-a8cb-af18fa1b976d-kube-api-access-qtpwz" (OuterVolumeSpecName: "kube-api-access-qtpwz") pod "f23369b5-8053-430d-a8cb-af18fa1b976d" (UID: "f23369b5-8053-430d-a8cb-af18fa1b976d"). InnerVolumeSpecName "kube-api-access-qtpwz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.158594 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qtpwz\" (UniqueName: \"kubernetes.io/projected/f23369b5-8053-430d-a8cb-af18fa1b976d-kube-api-access-qtpwz\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.158621 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f23369b5-8053-430d-a8cb-af18fa1b976d-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.158632 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.158639 4900 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.158650 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnt9z\" (UniqueName: \"kubernetes.io/projected/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-kube-api-access-pnt9z\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.158658 4900 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f23369b5-8053-430d-a8cb-af18fa1b976d-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.167941 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-scripts" (OuterVolumeSpecName: "scripts") pod "b9988606-d1b6-4ef7-acd1-a73f41f1dc92" (UID: "b9988606-d1b6-4ef7-acd1-a73f41f1dc92"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.168084 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-scripts" (OuterVolumeSpecName: "scripts") pod "f23369b5-8053-430d-a8cb-af18fa1b976d" (UID: "f23369b5-8053-430d-a8cb-af18fa1b976d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.259920 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.260745 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.292235 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f23369b5-8053-430d-a8cb-af18fa1b976d" (UID: "f23369b5-8053-430d-a8cb-af18fa1b976d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.354262 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b9988606-d1b6-4ef7-acd1-a73f41f1dc92" (UID: "b9988606-d1b6-4ef7-acd1-a73f41f1dc92"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.362649 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.362681 4900 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.370080 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b" (OuterVolumeSpecName: "glance") pod "b9988606-d1b6-4ef7-acd1-a73f41f1dc92" (UID: "b9988606-d1b6-4ef7-acd1-a73f41f1dc92"). InnerVolumeSpecName "pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.373448 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33" (OuterVolumeSpecName: "glance") pod "f23369b5-8053-430d-a8cb-af18fa1b976d" (UID: "f23369b5-8053-430d-a8cb-af18fa1b976d"). InnerVolumeSpecName "pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.383377 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b9988606-d1b6-4ef7-acd1-a73f41f1dc92" (UID: "b9988606-d1b6-4ef7-acd1-a73f41f1dc92"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.383378 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-config-data" (OuterVolumeSpecName: "config-data") pod "f23369b5-8053-430d-a8cb-af18fa1b976d" (UID: "f23369b5-8053-430d-a8cb-af18fa1b976d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.390835 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f23369b5-8053-430d-a8cb-af18fa1b976d" (UID: "f23369b5-8053-430d-a8cb-af18fa1b976d"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.398525 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-config-data" (OuterVolumeSpecName: "config-data") pod "b9988606-d1b6-4ef7-acd1-a73f41f1dc92" (UID: "b9988606-d1b6-4ef7-acd1-a73f41f1dc92"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.466312 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.466418 4900 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") on node \"crc\" " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.466443 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.466457 4900 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f23369b5-8053-430d-a8cb-af18fa1b976d-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.466470 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9988606-d1b6-4ef7-acd1-a73f41f1dc92-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.466502 4900 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") on node \"crc\" " Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.506679 4900 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.507108 4900 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b") on node "crc" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.540985 4900 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.541311 4900 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33") on node "crc" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.569180 4900 reconciler_common.go:293] "Volume detached for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.569234 4900 reconciler_common.go:293] "Volume detached for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") on node \"crc\" DevicePath \"\"" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.581314 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f23369b5-8053-430d-a8cb-af18fa1b976d","Type":"ContainerDied","Data":"fb0d531c6d33a82f23ebb11a2c2379aac208f1ccc1cebb5292c3ba8b41262d89"} Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.581391 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.581412 4900 scope.go:117] "RemoveContainer" containerID="acf5ac4cb56c0da001f79c933061459c23ecd037059ff68499ea6413d47fc9ea" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.584979 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.586619 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b9988606-d1b6-4ef7-acd1-a73f41f1dc92","Type":"ContainerDied","Data":"20e73e44c1f24e748abed0883fa6080671dc572403d65928b6eaa2bcdac499ad"} Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.660482 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.695435 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.702310 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.748521 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.776375 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:50:57 crc kubenswrapper[4900]: E0127 12:50:57.777228 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9988606-d1b6-4ef7-acd1-a73f41f1dc92" containerName="glance-log" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.777251 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9988606-d1b6-4ef7-acd1-a73f41f1dc92" containerName="glance-log" Jan 27 12:50:57 crc kubenswrapper[4900]: E0127 12:50:57.777278 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f23369b5-8053-430d-a8cb-af18fa1b976d" containerName="glance-log" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.777286 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f23369b5-8053-430d-a8cb-af18fa1b976d" containerName="glance-log" Jan 27 12:50:57 crc kubenswrapper[4900]: E0127 12:50:57.777315 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa7737bb-7773-450c-9fe8-f5e27d501de8" containerName="init" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.777324 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa7737bb-7773-450c-9fe8-f5e27d501de8" containerName="init" Jan 27 12:50:57 crc kubenswrapper[4900]: E0127 12:50:57.777342 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f23369b5-8053-430d-a8cb-af18fa1b976d" containerName="glance-httpd" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.777349 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f23369b5-8053-430d-a8cb-af18fa1b976d" containerName="glance-httpd" Jan 27 12:50:57 crc kubenswrapper[4900]: E0127 12:50:57.777364 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9988606-d1b6-4ef7-acd1-a73f41f1dc92" containerName="glance-httpd" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.777374 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9988606-d1b6-4ef7-acd1-a73f41f1dc92" containerName="glance-httpd" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.777663 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9988606-d1b6-4ef7-acd1-a73f41f1dc92" containerName="glance-httpd" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.777679 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f23369b5-8053-430d-a8cb-af18fa1b976d" containerName="glance-httpd" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.777698 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9988606-d1b6-4ef7-acd1-a73f41f1dc92" containerName="glance-log" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.777715 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa7737bb-7773-450c-9fe8-f5e27d501de8" containerName="init" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.777726 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f23369b5-8053-430d-a8cb-af18fa1b976d" containerName="glance-log" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.779467 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.783907 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.783921 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-6vqzq" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.784329 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.784739 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.808645 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.821106 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.827729 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.831629 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.831880 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.835953 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.888691 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.888865 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0be8476d-4715-4bb9-80da-51be3e2a13f5-logs\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.888908 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.888960 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.889024 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f84rj\" (UniqueName: \"kubernetes.io/projected/0be8476d-4715-4bb9-80da-51be3e2a13f5-kube-api-access-f84rj\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.889071 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.889245 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0be8476d-4715-4bb9-80da-51be3e2a13f5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.889291 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.992730 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.994303 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.994467 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f84rj\" (UniqueName: \"kubernetes.io/projected/0be8476d-4715-4bb9-80da-51be3e2a13f5-kube-api-access-f84rj\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.994505 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.994554 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.994578 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.994806 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a5b99b87-204a-4232-a935-ca645a00f906-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.994975 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0be8476d-4715-4bb9-80da-51be3e2a13f5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.995046 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.995111 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-scripts\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.995175 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:57 crc kubenswrapper[4900]: I0127 12:50:57.995203 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5b99b87-204a-4232-a935-ca645a00f906-logs\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.001302 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.002840 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.004100 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.009495 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.016219 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0be8476d-4715-4bb9-80da-51be3e2a13f5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.016954 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0be8476d-4715-4bb9-80da-51be3e2a13f5-logs\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.018303 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0be8476d-4715-4bb9-80da-51be3e2a13f5-logs\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.018380 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-config-data\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.018458 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.018517 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhmgv\" (UniqueName: \"kubernetes.io/projected/a5b99b87-204a-4232-a935-ca645a00f906-kube-api-access-hhmgv\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.040258 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.040313 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/4b03b1e0d87c93578458a398975bf619ff063e17c81725aa528a6bf239df8b6c/globalmount\"" pod="openstack/glance-default-internal-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.049730 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f84rj\" (UniqueName: \"kubernetes.io/projected/0be8476d-4715-4bb9-80da-51be3e2a13f5-kube-api-access-f84rj\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.131288 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.131665 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.131737 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.133613 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a5b99b87-204a-4232-a935-ca645a00f906-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.134085 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-scripts\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.134182 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5b99b87-204a-4232-a935-ca645a00f906-logs\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.134226 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-config-data\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.134299 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhmgv\" (UniqueName: \"kubernetes.io/projected/a5b99b87-204a-4232-a935-ca645a00f906-kube-api-access-hhmgv\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.136540 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5b99b87-204a-4232-a935-ca645a00f906-logs\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.136839 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a5b99b87-204a-4232-a935-ca645a00f906-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.168598 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.169261 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.171167 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-scripts\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.171604 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-config-data\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.173902 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.173967 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/95185e520e2875ca4693f40e4e3857c43a9e41c071319588428e729bb9badc1a/globalmount\"" pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.178658 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") pod \"glance-default-internal-api-0\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.189913 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhmgv\" (UniqueName: \"kubernetes.io/projected/a5b99b87-204a-4232-a935-ca645a00f906-kube-api-access-hhmgv\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.249191 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") pod \"glance-default-external-api-0\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " pod="openstack/glance-default-external-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.422296 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.516932 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9988606-d1b6-4ef7-acd1-a73f41f1dc92" path="/var/lib/kubelet/pods/b9988606-d1b6-4ef7-acd1-a73f41f1dc92/volumes" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.519477 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f23369b5-8053-430d-a8cb-af18fa1b976d" path="/var/lib/kubelet/pods/f23369b5-8053-430d-a8cb-af18fa1b976d/volumes" Jan 27 12:50:58 crc kubenswrapper[4900]: I0127 12:50:58.522710 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.296721 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.483759 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-fernet-keys\") pod \"819e890f-4f20-4b02-b41d-88cc6da2dee3\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.485031 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-combined-ca-bundle\") pod \"819e890f-4f20-4b02-b41d-88cc6da2dee3\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.485261 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-config-data\") pod \"819e890f-4f20-4b02-b41d-88cc6da2dee3\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.485481 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-credential-keys\") pod \"819e890f-4f20-4b02-b41d-88cc6da2dee3\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.485565 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fh9t5\" (UniqueName: \"kubernetes.io/projected/819e890f-4f20-4b02-b41d-88cc6da2dee3-kube-api-access-fh9t5\") pod \"819e890f-4f20-4b02-b41d-88cc6da2dee3\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.485618 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-scripts\") pod \"819e890f-4f20-4b02-b41d-88cc6da2dee3\" (UID: \"819e890f-4f20-4b02-b41d-88cc6da2dee3\") " Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.510006 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/819e890f-4f20-4b02-b41d-88cc6da2dee3-kube-api-access-fh9t5" (OuterVolumeSpecName: "kube-api-access-fh9t5") pod "819e890f-4f20-4b02-b41d-88cc6da2dee3" (UID: "819e890f-4f20-4b02-b41d-88cc6da2dee3"). InnerVolumeSpecName "kube-api-access-fh9t5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.510968 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "819e890f-4f20-4b02-b41d-88cc6da2dee3" (UID: "819e890f-4f20-4b02-b41d-88cc6da2dee3"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.518306 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "819e890f-4f20-4b02-b41d-88cc6da2dee3" (UID: "819e890f-4f20-4b02-b41d-88cc6da2dee3"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.518470 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-scripts" (OuterVolumeSpecName: "scripts") pod "819e890f-4f20-4b02-b41d-88cc6da2dee3" (UID: "819e890f-4f20-4b02-b41d-88cc6da2dee3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.521588 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "819e890f-4f20-4b02-b41d-88cc6da2dee3" (UID: "819e890f-4f20-4b02-b41d-88cc6da2dee3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.534909 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-config-data" (OuterVolumeSpecName: "config-data") pod "819e890f-4f20-4b02-b41d-88cc6da2dee3" (UID: "819e890f-4f20-4b02-b41d-88cc6da2dee3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.588585 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.588624 4900 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.588637 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fh9t5\" (UniqueName: \"kubernetes.io/projected/819e890f-4f20-4b02-b41d-88cc6da2dee3-kube-api-access-fh9t5\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.588646 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.588657 4900 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.588670 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/819e890f-4f20-4b02-b41d-88cc6da2dee3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:02 crc kubenswrapper[4900]: E0127 12:51:02.633556 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-swift-container:current-podified" Jan 27 12:51:02 crc kubenswrapper[4900]: E0127 12:51:02.633773 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:container-server,Image:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,Command:[/usr/bin/swift-container-server /etc/swift/container-server.conf.d -v],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:container,HostPort:0,ContainerPort:6201,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n56ch684h8ch8bh98hbfh4h8fh5f6h5dbh5dch58h575h59dh59h5cdh95h586h554h675h5cbh69h5cch58bh89h67bhd8hcchf7h574h644h668q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:swift,ReadOnly:false,MountPath:/srv/node/pv,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-swift,ReadOnly:false,MountPath:/etc/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cache,ReadOnly:false,MountPath:/var/cache/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:lock,ReadOnly:false,MountPath:/var/lock,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bk4m9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42445,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-storage-0_openstack(0c2f90a4-baa0-4eeb-a797-3664c306818b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.660674 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8f2g8" event={"ID":"819e890f-4f20-4b02-b41d-88cc6da2dee3","Type":"ContainerDied","Data":"e265ee93f2cc27c6d80ec0a7bed100aae04558d597b6533c72b328af5de46406"} Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.660717 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e265ee93f2cc27c6d80ec0a7bed100aae04558d597b6533c72b328af5de46406" Jan 27 12:51:02 crc kubenswrapper[4900]: I0127 12:51:02.660770 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8f2g8" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.524877 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-8f2g8"] Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.539398 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-8f2g8"] Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.671139 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-6hsbc"] Jan 27 12:51:03 crc kubenswrapper[4900]: E0127 12:51:03.672009 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="819e890f-4f20-4b02-b41d-88cc6da2dee3" containerName="keystone-bootstrap" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.672036 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="819e890f-4f20-4b02-b41d-88cc6da2dee3" containerName="keystone-bootstrap" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.672305 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="819e890f-4f20-4b02-b41d-88cc6da2dee3" containerName="keystone-bootstrap" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.673221 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.685690 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.686540 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.686735 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-k2fbf" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.687180 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.687188 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.743043 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-credential-keys\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.743137 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-scripts\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.743163 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5x7g\" (UniqueName: \"kubernetes.io/projected/551282ea-7408-404c-9128-cfa4f06089f3-kube-api-access-r5x7g\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.743196 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-combined-ca-bundle\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.743253 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-config-data\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.743282 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-fernet-keys\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.749017 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-6hsbc"] Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.845628 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-scripts\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.845686 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5x7g\" (UniqueName: \"kubernetes.io/projected/551282ea-7408-404c-9128-cfa4f06089f3-kube-api-access-r5x7g\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.845731 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-combined-ca-bundle\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.845784 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-config-data\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.845811 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-fernet-keys\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.845932 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-credential-keys\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.870082 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-fernet-keys\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.872458 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-scripts\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.875162 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-credential-keys\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.884134 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-combined-ca-bundle\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.896809 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-config-data\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:03 crc kubenswrapper[4900]: I0127 12:51:03.915790 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5x7g\" (UniqueName: \"kubernetes.io/projected/551282ea-7408-404c-9128-cfa4f06089f3-kube-api-access-r5x7g\") pod \"keystone-bootstrap-6hsbc\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:04 crc kubenswrapper[4900]: I0127 12:51:04.043444 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:04 crc kubenswrapper[4900]: I0127 12:51:04.510091 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="819e890f-4f20-4b02-b41d-88cc6da2dee3" path="/var/lib/kubelet/pods/819e890f-4f20-4b02-b41d-88cc6da2dee3/volumes" Jan 27 12:51:06 crc kubenswrapper[4900]: I0127 12:51:06.081528 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-z8pb8" podUID="67c4919e-b3dc-47ea-8728-03d0aaf07c18" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.149:5353: i/o timeout" Jan 27 12:51:07 crc kubenswrapper[4900]: I0127 12:51:07.637961 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:51:07 crc kubenswrapper[4900]: I0127 12:51:07.784874 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-ovsdbserver-nb\") pod \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " Jan 27 12:51:07 crc kubenswrapper[4900]: I0127 12:51:07.784953 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-config\") pod \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " Jan 27 12:51:07 crc kubenswrapper[4900]: I0127 12:51:07.785289 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-ovsdbserver-sb\") pod \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " Jan 27 12:51:07 crc kubenswrapper[4900]: I0127 12:51:07.785575 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phtbq\" (UniqueName: \"kubernetes.io/projected/67c4919e-b3dc-47ea-8728-03d0aaf07c18-kube-api-access-phtbq\") pod \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " Jan 27 12:51:07 crc kubenswrapper[4900]: I0127 12:51:07.785652 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-dns-svc\") pod \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\" (UID: \"67c4919e-b3dc-47ea-8728-03d0aaf07c18\") " Jan 27 12:51:07 crc kubenswrapper[4900]: I0127 12:51:07.804591 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-z8pb8" event={"ID":"67c4919e-b3dc-47ea-8728-03d0aaf07c18","Type":"ContainerDied","Data":"880ba9dfb2dee180901fcb107a8eb785b54a213ba94f79df835d0f643abf9c45"} Jan 27 12:51:07 crc kubenswrapper[4900]: I0127 12:51:07.804823 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-z8pb8" Jan 27 12:51:07 crc kubenswrapper[4900]: I0127 12:51:07.886476 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67c4919e-b3dc-47ea-8728-03d0aaf07c18-kube-api-access-phtbq" (OuterVolumeSpecName: "kube-api-access-phtbq") pod "67c4919e-b3dc-47ea-8728-03d0aaf07c18" (UID: "67c4919e-b3dc-47ea-8728-03d0aaf07c18"). InnerVolumeSpecName "kube-api-access-phtbq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:51:07 crc kubenswrapper[4900]: I0127 12:51:07.901774 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phtbq\" (UniqueName: \"kubernetes.io/projected/67c4919e-b3dc-47ea-8728-03d0aaf07c18-kube-api-access-phtbq\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:07 crc kubenswrapper[4900]: I0127 12:51:07.939989 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-config" (OuterVolumeSpecName: "config") pod "67c4919e-b3dc-47ea-8728-03d0aaf07c18" (UID: "67c4919e-b3dc-47ea-8728-03d0aaf07c18"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:07 crc kubenswrapper[4900]: I0127 12:51:07.951496 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "67c4919e-b3dc-47ea-8728-03d0aaf07c18" (UID: "67c4919e-b3dc-47ea-8728-03d0aaf07c18"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:07 crc kubenswrapper[4900]: I0127 12:51:07.961021 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "67c4919e-b3dc-47ea-8728-03d0aaf07c18" (UID: "67c4919e-b3dc-47ea-8728-03d0aaf07c18"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:07 crc kubenswrapper[4900]: I0127 12:51:07.962390 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "67c4919e-b3dc-47ea-8728-03d0aaf07c18" (UID: "67c4919e-b3dc-47ea-8728-03d0aaf07c18"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:08 crc kubenswrapper[4900]: I0127 12:51:08.015128 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:08 crc kubenswrapper[4900]: I0127 12:51:08.015168 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:08 crc kubenswrapper[4900]: I0127 12:51:08.015178 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:08 crc kubenswrapper[4900]: I0127 12:51:08.015186 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67c4919e-b3dc-47ea-8728-03d0aaf07c18-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:08 crc kubenswrapper[4900]: I0127 12:51:08.159330 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-z8pb8"] Jan 27 12:51:08 crc kubenswrapper[4900]: I0127 12:51:08.171120 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-z8pb8"] Jan 27 12:51:08 crc kubenswrapper[4900]: I0127 12:51:08.494288 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67c4919e-b3dc-47ea-8728-03d0aaf07c18" path="/var/lib/kubelet/pods/67c4919e-b3dc-47ea-8728-03d0aaf07c18/volumes" Jan 27 12:51:11 crc kubenswrapper[4900]: I0127 12:51:11.082622 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-z8pb8" podUID="67c4919e-b3dc-47ea-8728-03d0aaf07c18" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.149:5353: i/o timeout" Jan 27 12:51:18 crc kubenswrapper[4900]: I0127 12:51:18.994408 4900 scope.go:117] "RemoveContainer" containerID="be405de2e5815a7f4fadc67d0227ef27e616baa4ba588a0f8533a32667fc5fa9" Jan 27 12:51:19 crc kubenswrapper[4900]: E0127 12:51:19.073551 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified" Jan 27 12:51:19 crc kubenswrapper[4900]: E0127 12:51:19.073747 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fnfxx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nwqdh_openstack(b5375696-4614-47d4-a8aa-2a98bdd0bd17): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:51:19 crc kubenswrapper[4900]: E0127 12:51:19.074984 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-nwqdh" podUID="b5375696-4614-47d4-a8aa-2a98bdd0bd17" Jan 27 12:51:19 crc kubenswrapper[4900]: E0127 12:51:19.518630 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Jan 27 12:51:19 crc kubenswrapper[4900]: E0127 12:51:19.518806 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n54ch54h657h7chbdh58dhb8h664hdhb7h64ch86h57h654h5c7hd6h687h5d6h674h76h88h5fch564hc4h5b6h5c8hf7h648hf9hc9h678h5c6q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nczbn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:51:19 crc kubenswrapper[4900]: I0127 12:51:19.976034 4900 generic.go:334] "Generic (PLEG): container finished" podID="f3932d13-5d69-455f-88db-3978da7a8c00" containerID="2cdcc5f6b97f6d2c57e295057c54e9dbf3f2c114cb50cf9a4ad28c5343df0fbd" exitCode=0 Jan 27 12:51:19 crc kubenswrapper[4900]: I0127 12:51:19.976097 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-d4wfs" event={"ID":"f3932d13-5d69-455f-88db-3978da7a8c00","Type":"ContainerDied","Data":"2cdcc5f6b97f6d2c57e295057c54e9dbf3f2c114cb50cf9a4ad28c5343df0fbd"} Jan 27 12:51:19 crc kubenswrapper[4900]: E0127 12:51:19.982389 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified\\\"\"" pod="openstack/heat-db-sync-nwqdh" podUID="b5375696-4614-47d4-a8aa-2a98bdd0bd17" Jan 27 12:51:20 crc kubenswrapper[4900]: E0127 12:51:20.785891 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Jan 27 12:51:20 crc kubenswrapper[4900]: E0127 12:51:20.786141 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j4hps,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-2rf6d_openstack(f3331ea7-d796-459a-9e9d-6f744ba8822b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:51:20 crc kubenswrapper[4900]: E0127 12:51:20.787764 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-2rf6d" podUID="f3331ea7-d796-459a-9e9d-6f744ba8822b" Jan 27 12:51:20 crc kubenswrapper[4900]: E0127 12:51:20.997852 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-2rf6d" podUID="f3331ea7-d796-459a-9e9d-6f744ba8822b" Jan 27 12:51:21 crc kubenswrapper[4900]: E0127 12:51:21.225681 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-swift-object:current-podified" Jan 27 12:51:21 crc kubenswrapper[4900]: E0127 12:51:21.225900 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:object-server,Image:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,Command:[/usr/bin/swift-object-server /etc/swift/object-server.conf.d -v],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:object,HostPort:0,ContainerPort:6200,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n56ch684h8ch8bh98hbfh4h8fh5f6h5dbh5dch58h575h59dh59h5cdh95h586h554h675h5cbh69h5cch58bh89h67bhd8hcchf7h574h644h668q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:swift,ReadOnly:false,MountPath:/srv/node/pv,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-swift,ReadOnly:false,MountPath:/etc/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cache,ReadOnly:false,MountPath:/var/cache/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:lock,ReadOnly:false,MountPath:/var/lock,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bk4m9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42445,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-storage-0_openstack(0c2f90a4-baa0-4eeb-a797-3664c306818b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:51:21 crc kubenswrapper[4900]: I0127 12:51:21.281271 4900 scope.go:117] "RemoveContainer" containerID="7fad735114113704d286c75bebec0ec12fd47d79383ab86e1e6481a77b1abca8" Jan 27 12:51:21 crc kubenswrapper[4900]: I0127 12:51:21.317843 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:51:21 crc kubenswrapper[4900]: I0127 12:51:21.776330 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-6hsbc"] Jan 27 12:51:21 crc kubenswrapper[4900]: I0127 12:51:21.825170 4900 scope.go:117] "RemoveContainer" containerID="f5dcf7b70b726d88aa89919739136875cc1f1c9073d4a7f1469b0e8c522ba804" Jan 27 12:51:21 crc kubenswrapper[4900]: I0127 12:51:21.889823 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 27 12:51:21 crc kubenswrapper[4900]: I0127 12:51:21.988809 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.030738 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4kmq8" event={"ID":"a5c58344-109f-4fd1-948c-39ef56d4b0eb","Type":"ContainerStarted","Data":"a4174ce3a965df25a117d4db3cc9b3b623246141a958b9c57fb41c4814dfcffb"} Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.034722 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a5b99b87-204a-4232-a935-ca645a00f906","Type":"ContainerStarted","Data":"a8c98ad8358c5bc0ae03d32179d2006e5e6d6c52bd2048141e20c057c18ff0d9"} Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.038296 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6hsbc" event={"ID":"551282ea-7408-404c-9128-cfa4f06089f3","Type":"ContainerStarted","Data":"d71e80ea11fc30da8bb447bd2f46ae629cc687699923091027ee7430efb4696d"} Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.039419 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-d4wfs" event={"ID":"f3932d13-5d69-455f-88db-3978da7a8c00","Type":"ContainerDied","Data":"24fe9718e9288540c5fb3dd61e4f915bc8116944fc67d34a84d0242c89169c8e"} Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.039446 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24fe9718e9288540c5fb3dd61e4f915bc8116944fc67d34a84d0242c89169c8e" Jan 27 12:51:22 crc kubenswrapper[4900]: W0127 12:51:22.039861 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0be8476d_4715_4bb9_80da_51be3e2a13f5.slice/crio-288894f3d127df23197c00b8967b257b7a3e9d73a8bd6c2a8c05334a6b23857e WatchSource:0}: Error finding container 288894f3d127df23197c00b8967b257b7a3e9d73a8bd6c2a8c05334a6b23857e: Status 404 returned error can't find the container with id 288894f3d127df23197c00b8967b257b7a3e9d73a8bd6c2a8c05334a6b23857e Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.096283 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-4kmq8" podStartSLOduration=8.16349703 podStartE2EDuration="39.096258061s" podCreationTimestamp="2026-01-27 12:50:43 +0000 UTC" firstStartedPulling="2026-01-27 12:50:48.048739081 +0000 UTC m=+1475.285767291" lastFinishedPulling="2026-01-27 12:51:18.981500112 +0000 UTC m=+1506.218528322" observedRunningTime="2026-01-27 12:51:22.049172119 +0000 UTC m=+1509.286200319" watchObservedRunningTime="2026-01-27 12:51:22.096258061 +0000 UTC m=+1509.333286271" Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.121410 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-d4wfs" Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.143296 4900 scope.go:117] "RemoveContainer" containerID="bd25891da5f2894b1dc28a7be89eaefdea2527622d3e2a18e60d2e172cbac996" Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.201431 4900 scope.go:117] "RemoveContainer" containerID="07924ff35100f3b5d21779c1abba6f98705d9382bf52fb1b4961abf725123e2d" Jan 27 12:51:22 crc kubenswrapper[4900]: E0127 12:51:22.251928 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"container-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"object-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"object-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"object-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"object-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"rsync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"swift-recon-cron\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\"]" pod="openstack/swift-storage-0" podUID="0c2f90a4-baa0-4eeb-a797-3664c306818b" Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.306204 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3932d13-5d69-455f-88db-3978da7a8c00-combined-ca-bundle\") pod \"f3932d13-5d69-455f-88db-3978da7a8c00\" (UID: \"f3932d13-5d69-455f-88db-3978da7a8c00\") " Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.306341 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f3932d13-5d69-455f-88db-3978da7a8c00-config\") pod \"f3932d13-5d69-455f-88db-3978da7a8c00\" (UID: \"f3932d13-5d69-455f-88db-3978da7a8c00\") " Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.306440 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92j94\" (UniqueName: \"kubernetes.io/projected/f3932d13-5d69-455f-88db-3978da7a8c00-kube-api-access-92j94\") pod \"f3932d13-5d69-455f-88db-3978da7a8c00\" (UID: \"f3932d13-5d69-455f-88db-3978da7a8c00\") " Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.317708 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3932d13-5d69-455f-88db-3978da7a8c00-kube-api-access-92j94" (OuterVolumeSpecName: "kube-api-access-92j94") pod "f3932d13-5d69-455f-88db-3978da7a8c00" (UID: "f3932d13-5d69-455f-88db-3978da7a8c00"). InnerVolumeSpecName "kube-api-access-92j94". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.343235 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3932d13-5d69-455f-88db-3978da7a8c00-config" (OuterVolumeSpecName: "config") pod "f3932d13-5d69-455f-88db-3978da7a8c00" (UID: "f3932d13-5d69-455f-88db-3978da7a8c00"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.344824 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3932d13-5d69-455f-88db-3978da7a8c00-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f3932d13-5d69-455f-88db-3978da7a8c00" (UID: "f3932d13-5d69-455f-88db-3978da7a8c00"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.410197 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92j94\" (UniqueName: \"kubernetes.io/projected/f3932d13-5d69-455f-88db-3978da7a8c00-kube-api-access-92j94\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.410241 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3932d13-5d69-455f-88db-3978da7a8c00-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:22 crc kubenswrapper[4900]: I0127 12:51:22.410253 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/f3932d13-5d69-455f-88db-3978da7a8c00-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.067655 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0be8476d-4715-4bb9-80da-51be3e2a13f5","Type":"ContainerStarted","Data":"a11533ea663b5a9f4b59c504426773b53f0340a96987b0693afa908b552664a3"} Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.067727 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0be8476d-4715-4bb9-80da-51be3e2a13f5","Type":"ContainerStarted","Data":"288894f3d127df23197c00b8967b257b7a3e9d73a8bd6c2a8c05334a6b23857e"} Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.095095 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"2f91f1cd003c148ac1492cebf5485cd09a47b1843fe3f94055162d19c3f4779e"} Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.103813 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed","Type":"ContainerStarted","Data":"e4682ccb7862290593bfb7000b3d33b671743bff45ecae501e8cf40a8eeaaa86"} Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.106825 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6hsbc" event={"ID":"551282ea-7408-404c-9128-cfa4f06089f3","Type":"ContainerStarted","Data":"8a64a45b84d96da73c53d0f60621fde345c4d6a741e8167c70565c4c3b743b9d"} Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.109474 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-dr6wx" event={"ID":"ed853af8-ff05-4908-a82f-deceefe54dad","Type":"ContainerStarted","Data":"0532c7a538ec809066d2d2fdc38769e51f4471fad40e2ed23c77bd481b51d99f"} Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.120376 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a5b99b87-204a-4232-a935-ca645a00f906","Type":"ContainerStarted","Data":"d08b67263191daaa6407ee3acf61da67d89289f11204efe1ffe8614bbbfc3b02"} Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.126465 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-d4wfs" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.236886 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-dr6wx" podStartSLOduration=7.481599962 podStartE2EDuration="40.236856752s" podCreationTimestamp="2026-01-27 12:50:43 +0000 UTC" firstStartedPulling="2026-01-27 12:50:47.980082229 +0000 UTC m=+1475.217110439" lastFinishedPulling="2026-01-27 12:51:20.735339019 +0000 UTC m=+1507.972367229" observedRunningTime="2026-01-27 12:51:23.186329639 +0000 UTC m=+1510.423357849" watchObservedRunningTime="2026-01-27 12:51:23.236856752 +0000 UTC m=+1510.473884962" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.241485 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-6hsbc" podStartSLOduration=20.241467436 podStartE2EDuration="20.241467436s" podCreationTimestamp="2026-01-27 12:51:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:51:23.225754418 +0000 UTC m=+1510.462782638" watchObservedRunningTime="2026-01-27 12:51:23.241467436 +0000 UTC m=+1510.478495646" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.405347 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fb745b69-qbm9l"] Jan 27 12:51:23 crc kubenswrapper[4900]: E0127 12:51:23.408844 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3932d13-5d69-455f-88db-3978da7a8c00" containerName="neutron-db-sync" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.408894 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3932d13-5d69-455f-88db-3978da7a8c00" containerName="neutron-db-sync" Jan 27 12:51:23 crc kubenswrapper[4900]: E0127 12:51:23.408921 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67c4919e-b3dc-47ea-8728-03d0aaf07c18" containerName="init" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.408927 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="67c4919e-b3dc-47ea-8728-03d0aaf07c18" containerName="init" Jan 27 12:51:23 crc kubenswrapper[4900]: E0127 12:51:23.408936 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67c4919e-b3dc-47ea-8728-03d0aaf07c18" containerName="dnsmasq-dns" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.408943 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="67c4919e-b3dc-47ea-8728-03d0aaf07c18" containerName="dnsmasq-dns" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.409894 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3932d13-5d69-455f-88db-3978da7a8c00" containerName="neutron-db-sync" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.410010 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="67c4919e-b3dc-47ea-8728-03d0aaf07c18" containerName="dnsmasq-dns" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.411325 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.465422 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-qbm9l"] Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.484193 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5bf6b97d5b-nvggn"] Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.486884 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.490789 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.490809 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.491268 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.501630 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-55hcd" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.507530 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5bf6b97d5b-nvggn"] Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.553000 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-dns-svc\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.553096 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6wpb\" (UniqueName: \"kubernetes.io/projected/47942ee1-102a-42b0-a25a-e41b9a020d95-kube-api-access-c6wpb\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.553150 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-ovsdbserver-nb\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.553184 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-ovsdbserver-sb\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.553260 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-config\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.658778 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wd4nt\" (UniqueName: \"kubernetes.io/projected/a5e02e88-5851-4d7a-93fe-39c857d7f2de-kube-api-access-wd4nt\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.659268 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-combined-ca-bundle\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.659299 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-ovndb-tls-certs\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.659479 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-dns-svc\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.659531 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-config\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.659556 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6wpb\" (UniqueName: \"kubernetes.io/projected/47942ee1-102a-42b0-a25a-e41b9a020d95-kube-api-access-c6wpb\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.659604 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-ovsdbserver-nb\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.659647 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-ovsdbserver-sb\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.659701 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-httpd-config\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.659757 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-config\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.660795 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-config\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.664904 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-dns-svc\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.665907 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-ovsdbserver-nb\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.669634 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-ovsdbserver-sb\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.740227 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6wpb\" (UniqueName: \"kubernetes.io/projected/47942ee1-102a-42b0-a25a-e41b9a020d95-kube-api-access-c6wpb\") pod \"dnsmasq-dns-fb745b69-qbm9l\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.749897 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.768973 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-config\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.769205 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-httpd-config\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.769302 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wd4nt\" (UniqueName: \"kubernetes.io/projected/a5e02e88-5851-4d7a-93fe-39c857d7f2de-kube-api-access-wd4nt\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.769396 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-combined-ca-bundle\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.769418 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-ovndb-tls-certs\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.777284 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-httpd-config\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.789318 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-ovndb-tls-certs\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.807644 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-config\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.808258 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-combined-ca-bundle\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.816598 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wd4nt\" (UniqueName: \"kubernetes.io/projected/a5e02e88-5851-4d7a-93fe-39c857d7f2de-kube-api-access-wd4nt\") pod \"neutron-5bf6b97d5b-nvggn\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:23 crc kubenswrapper[4900]: I0127 12:51:23.830861 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:24 crc kubenswrapper[4900]: I0127 12:51:24.201460 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a5b99b87-204a-4232-a935-ca645a00f906","Type":"ContainerStarted","Data":"7e03f4d7c487c359b2d259c5a1eb3d5d58c6cc71dafe5b9cd2d871f47f658af0"} Jan 27 12:51:24 crc kubenswrapper[4900]: I0127 12:51:24.234950 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=27.234924477 podStartE2EDuration="27.234924477s" podCreationTimestamp="2026-01-27 12:50:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:51:24.225791381 +0000 UTC m=+1511.462819591" watchObservedRunningTime="2026-01-27 12:51:24.234924477 +0000 UTC m=+1511.471952677" Jan 27 12:51:24 crc kubenswrapper[4900]: I0127 12:51:24.841168 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-qbm9l"] Jan 27 12:51:25 crc kubenswrapper[4900]: I0127 12:51:25.237890 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"6ee86e611c26234ca0474ffa01be6a442af3355b1a9550edbf1250ebe35d3df6"} Jan 27 12:51:25 crc kubenswrapper[4900]: I0127 12:51:25.241637 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-qbm9l" event={"ID":"47942ee1-102a-42b0-a25a-e41b9a020d95","Type":"ContainerStarted","Data":"709bf15b1cdf3bd5e8051079d9f5cf35bc66044e13b4323a797e7c1fde4066a1"} Jan 27 12:51:25 crc kubenswrapper[4900]: I0127 12:51:25.244277 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0be8476d-4715-4bb9-80da-51be3e2a13f5","Type":"ContainerStarted","Data":"8c8f2538bbe8cb65c92e9242b91b79b48cb497ead1d707f269c972c9d1fe305d"} Jan 27 12:51:25 crc kubenswrapper[4900]: I0127 12:51:25.269622 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=28.269596788 podStartE2EDuration="28.269596788s" podCreationTimestamp="2026-01-27 12:50:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:51:25.26829052 +0000 UTC m=+1512.505318740" watchObservedRunningTime="2026-01-27 12:51:25.269596788 +0000 UTC m=+1512.506624998" Jan 27 12:51:25 crc kubenswrapper[4900]: I0127 12:51:25.329477 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5bf6b97d5b-nvggn"] Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.319104 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5bf6b97d5b-nvggn" event={"ID":"a5e02e88-5851-4d7a-93fe-39c857d7f2de","Type":"ContainerStarted","Data":"f0b725221906d9a65689350818bed6a5afc6d225bf2f300009ad498ceae59e57"} Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.319819 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5bf6b97d5b-nvggn" event={"ID":"a5e02e88-5851-4d7a-93fe-39c857d7f2de","Type":"ContainerStarted","Data":"14dcee93e5085057492fe5ac53fe19aa680d01e0b89cb447830ae9f60b8b8743"} Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.358788 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"f78741f6523488fb7aa0afae6cae41d3a70497f188310bb1807dadc19069f8d2"} Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.358854 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"e1854847df294fb95553bc05c5be859aeadd238414aa90c6deb481e64544c2ea"} Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.362771 4900 generic.go:334] "Generic (PLEG): container finished" podID="47942ee1-102a-42b0-a25a-e41b9a020d95" containerID="d426933f4f20c727c89c158c0835711132a2af9aa079afd56f846cb5683aa071" exitCode=0 Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.362929 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-qbm9l" event={"ID":"47942ee1-102a-42b0-a25a-e41b9a020d95","Type":"ContainerDied","Data":"d426933f4f20c727c89c158c0835711132a2af9aa079afd56f846cb5683aa071"} Jan 27 12:51:26 crc kubenswrapper[4900]: E0127 12:51:26.568317 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"object-server\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"object-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"object-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"object-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"rsync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"swift-recon-cron\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\"]" pod="openstack/swift-storage-0" podUID="0c2f90a4-baa0-4eeb-a797-3664c306818b" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.590252 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-65d58d75c7-pn4zc"] Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.592349 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.598596 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.598870 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.649597 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-65d58d75c7-pn4zc"] Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.713709 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-combined-ca-bundle\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.713832 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-httpd-config\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.713883 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-public-tls-certs\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.713904 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-config\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.713949 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-ovndb-tls-certs\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.714107 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-internal-tls-certs\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.714183 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zwdq\" (UniqueName: \"kubernetes.io/projected/58051d5f-ca34-40db-9b7c-da0d095fa129-kube-api-access-7zwdq\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.816013 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-internal-tls-certs\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.816164 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zwdq\" (UniqueName: \"kubernetes.io/projected/58051d5f-ca34-40db-9b7c-da0d095fa129-kube-api-access-7zwdq\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.816229 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-combined-ca-bundle\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.816319 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-httpd-config\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.816366 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-public-tls-certs\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.816391 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-config\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.816445 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-ovndb-tls-certs\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.824999 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-ovndb-tls-certs\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.827039 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-httpd-config\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.828748 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-config\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.828741 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-internal-tls-certs\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.830273 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-public-tls-certs\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.831042 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-combined-ca-bundle\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.870837 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zwdq\" (UniqueName: \"kubernetes.io/projected/58051d5f-ca34-40db-9b7c-da0d095fa129-kube-api-access-7zwdq\") pod \"neutron-65d58d75c7-pn4zc\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:26 crc kubenswrapper[4900]: I0127 12:51:26.913696 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:27 crc kubenswrapper[4900]: I0127 12:51:27.382907 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5bf6b97d5b-nvggn" event={"ID":"a5e02e88-5851-4d7a-93fe-39c857d7f2de","Type":"ContainerStarted","Data":"c57a1f196de92f004e5836fa5b3e9a73328a8ac375d79a67ea6ae82c79b0b3e7"} Jan 27 12:51:27 crc kubenswrapper[4900]: I0127 12:51:27.384245 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:27 crc kubenswrapper[4900]: I0127 12:51:27.417846 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"362b5c770b51dcfffedd466c4ae59d674e761320eb1079acfc69cb309de8f30e"} Jan 27 12:51:27 crc kubenswrapper[4900]: I0127 12:51:27.424406 4900 generic.go:334] "Generic (PLEG): container finished" podID="a5c58344-109f-4fd1-948c-39ef56d4b0eb" containerID="a4174ce3a965df25a117d4db3cc9b3b623246141a958b9c57fb41c4814dfcffb" exitCode=0 Jan 27 12:51:27 crc kubenswrapper[4900]: I0127 12:51:27.424462 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4kmq8" event={"ID":"a5c58344-109f-4fd1-948c-39ef56d4b0eb","Type":"ContainerDied","Data":"a4174ce3a965df25a117d4db3cc9b3b623246141a958b9c57fb41c4814dfcffb"} Jan 27 12:51:27 crc kubenswrapper[4900]: E0127 12:51:27.424704 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"object-server\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"object-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"object-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"object-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"rsync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"swift-recon-cron\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\"]" pod="openstack/swift-storage-0" podUID="0c2f90a4-baa0-4eeb-a797-3664c306818b" Jan 27 12:51:27 crc kubenswrapper[4900]: I0127 12:51:27.425233 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5bf6b97d5b-nvggn" podStartSLOduration=4.425216868 podStartE2EDuration="4.425216868s" podCreationTimestamp="2026-01-27 12:51:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:51:27.410413546 +0000 UTC m=+1514.647441776" watchObservedRunningTime="2026-01-27 12:51:27.425216868 +0000 UTC m=+1514.662245078" Jan 27 12:51:27 crc kubenswrapper[4900]: W0127 12:51:27.737510 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod58051d5f_ca34_40db_9b7c_da0d095fa129.slice/crio-892a25fdbbf96f8a19a5a35d81b533d21243c6f60fbf39d7c6fc8f3867d6f0bb WatchSource:0}: Error finding container 892a25fdbbf96f8a19a5a35d81b533d21243c6f60fbf39d7c6fc8f3867d6f0bb: Status 404 returned error can't find the container with id 892a25fdbbf96f8a19a5a35d81b533d21243c6f60fbf39d7c6fc8f3867d6f0bb Jan 27 12:51:27 crc kubenswrapper[4900]: I0127 12:51:27.738106 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-65d58d75c7-pn4zc"] Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.423859 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.425100 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.425117 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.425138 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.448141 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-qbm9l" event={"ID":"47942ee1-102a-42b0-a25a-e41b9a020d95","Type":"ContainerStarted","Data":"e657aeacbe8e5e63fbbf129c3ed6c2019d7448ad183596ce5fe4a231cfa6538c"} Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.448749 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.455752 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-65d58d75c7-pn4zc" event={"ID":"58051d5f-ca34-40db-9b7c-da0d095fa129","Type":"ContainerStarted","Data":"b8e2928c7635575c4f71d0b5ddd38074992cfe3a7c9b6ae9ab9edfcda36442e3"} Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.456087 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-65d58d75c7-pn4zc" event={"ID":"58051d5f-ca34-40db-9b7c-da0d095fa129","Type":"ContainerStarted","Data":"892a25fdbbf96f8a19a5a35d81b533d21243c6f60fbf39d7c6fc8f3867d6f0bb"} Jan 27 12:51:28 crc kubenswrapper[4900]: E0127 12:51:28.469491 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"object-server\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"object-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"object-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"object-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"rsync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\", failed to \"StartContainer\" for \"swift-recon-cron\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-object:current-podified\\\"\"]" pod="openstack/swift-storage-0" podUID="0c2f90a4-baa0-4eeb-a797-3664c306818b" Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.473595 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.490106 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fb745b69-qbm9l" podStartSLOduration=5.49007494 podStartE2EDuration="5.49007494s" podCreationTimestamp="2026-01-27 12:51:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:51:28.470303704 +0000 UTC m=+1515.707331934" watchObservedRunningTime="2026-01-27 12:51:28.49007494 +0000 UTC m=+1515.727103150" Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.517614 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.523558 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.523610 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.523623 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.523632 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.581323 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 27 12:51:28 crc kubenswrapper[4900]: I0127 12:51:28.612587 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 27 12:51:30 crc kubenswrapper[4900]: I0127 12:51:30.533090 4900 generic.go:334] "Generic (PLEG): container finished" podID="ed853af8-ff05-4908-a82f-deceefe54dad" containerID="0532c7a538ec809066d2d2fdc38769e51f4471fad40e2ed23c77bd481b51d99f" exitCode=0 Jan 27 12:51:30 crc kubenswrapper[4900]: I0127 12:51:30.533430 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-dr6wx" event={"ID":"ed853af8-ff05-4908-a82f-deceefe54dad","Type":"ContainerDied","Data":"0532c7a538ec809066d2d2fdc38769e51f4471fad40e2ed23c77bd481b51d99f"} Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.268414 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4kmq8" Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.378533 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-scripts\") pod \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.378625 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-config-data\") pod \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.378721 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5c58344-109f-4fd1-948c-39ef56d4b0eb-logs\") pod \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.378869 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-combined-ca-bundle\") pod \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.378936 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rfxdf\" (UniqueName: \"kubernetes.io/projected/a5c58344-109f-4fd1-948c-39ef56d4b0eb-kube-api-access-rfxdf\") pod \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\" (UID: \"a5c58344-109f-4fd1-948c-39ef56d4b0eb\") " Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.380164 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5c58344-109f-4fd1-948c-39ef56d4b0eb-logs" (OuterVolumeSpecName: "logs") pod "a5c58344-109f-4fd1-948c-39ef56d4b0eb" (UID: "a5c58344-109f-4fd1-948c-39ef56d4b0eb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.385261 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5c58344-109f-4fd1-948c-39ef56d4b0eb-kube-api-access-rfxdf" (OuterVolumeSpecName: "kube-api-access-rfxdf") pod "a5c58344-109f-4fd1-948c-39ef56d4b0eb" (UID: "a5c58344-109f-4fd1-948c-39ef56d4b0eb"). InnerVolumeSpecName "kube-api-access-rfxdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.393946 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-scripts" (OuterVolumeSpecName: "scripts") pod "a5c58344-109f-4fd1-948c-39ef56d4b0eb" (UID: "a5c58344-109f-4fd1-948c-39ef56d4b0eb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.434308 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a5c58344-109f-4fd1-948c-39ef56d4b0eb" (UID: "a5c58344-109f-4fd1-948c-39ef56d4b0eb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.436035 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-config-data" (OuterVolumeSpecName: "config-data") pod "a5c58344-109f-4fd1-948c-39ef56d4b0eb" (UID: "a5c58344-109f-4fd1-948c-39ef56d4b0eb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.483796 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5c58344-109f-4fd1-948c-39ef56d4b0eb-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.484122 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.484139 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rfxdf\" (UniqueName: \"kubernetes.io/projected/a5c58344-109f-4fd1-948c-39ef56d4b0eb-kube-api-access-rfxdf\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.484149 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.484156 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5c58344-109f-4fd1-948c-39ef56d4b0eb-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.561437 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4kmq8" event={"ID":"a5c58344-109f-4fd1-948c-39ef56d4b0eb","Type":"ContainerDied","Data":"08eeeb7298ae337718e96c66b933c1383c59fc4430d48e34fa1d24d44a5b4cd6"} Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.561507 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="08eeeb7298ae337718e96c66b933c1383c59fc4430d48e34fa1d24d44a5b4cd6" Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.561599 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4kmq8" Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.582347 4900 generic.go:334] "Generic (PLEG): container finished" podID="551282ea-7408-404c-9128-cfa4f06089f3" containerID="8a64a45b84d96da73c53d0f60621fde345c4d6a741e8167c70565c4c3b743b9d" exitCode=0 Jan 27 12:51:31 crc kubenswrapper[4900]: I0127 12:51:31.582414 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6hsbc" event={"ID":"551282ea-7408-404c-9128-cfa4f06089f3","Type":"ContainerDied","Data":"8a64a45b84d96da73c53d0f60621fde345c4d6a741e8167c70565c4c3b743b9d"} Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.424473 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-74d8686b8d-zh4mj"] Jan 27 12:51:32 crc kubenswrapper[4900]: E0127 12:51:32.425449 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5c58344-109f-4fd1-948c-39ef56d4b0eb" containerName="placement-db-sync" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.425467 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5c58344-109f-4fd1-948c-39ef56d4b0eb" containerName="placement-db-sync" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.425695 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5c58344-109f-4fd1-948c-39ef56d4b0eb" containerName="placement-db-sync" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.427149 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.431687 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-b6zdc" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.431967 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.433379 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.434956 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.435671 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.474256 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-74d8686b8d-zh4mj"] Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.510599 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-combined-ca-bundle\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.510654 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-config-data\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.510713 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-public-tls-certs\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.510891 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eec75b81-8153-4685-a1bc-826a5abed42b-logs\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.511166 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-internal-tls-certs\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.511452 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-scripts\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.511489 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwszc\" (UniqueName: \"kubernetes.io/projected/eec75b81-8153-4685-a1bc-826a5abed42b-kube-api-access-mwszc\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.614780 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-internal-tls-certs\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.614950 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-scripts\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.614978 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwszc\" (UniqueName: \"kubernetes.io/projected/eec75b81-8153-4685-a1bc-826a5abed42b-kube-api-access-mwszc\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.615009 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-combined-ca-bundle\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.615031 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-config-data\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.615070 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-public-tls-certs\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.615115 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eec75b81-8153-4685-a1bc-826a5abed42b-logs\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.615624 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eec75b81-8153-4685-a1bc-826a5abed42b-logs\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.631726 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-public-tls-certs\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.633843 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-combined-ca-bundle\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.640444 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-scripts\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.642546 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-config-data\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.642670 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/eec75b81-8153-4685-a1bc-826a5abed42b-internal-tls-certs\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.648644 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwszc\" (UniqueName: \"kubernetes.io/projected/eec75b81-8153-4685-a1bc-826a5abed42b-kube-api-access-mwszc\") pod \"placement-74d8686b8d-zh4mj\" (UID: \"eec75b81-8153-4685-a1bc-826a5abed42b\") " pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:32 crc kubenswrapper[4900]: I0127 12:51:32.755696 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:33 crc kubenswrapper[4900]: I0127 12:51:33.618197 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 27 12:51:33 crc kubenswrapper[4900]: I0127 12:51:33.619217 4900 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 12:51:33 crc kubenswrapper[4900]: I0127 12:51:33.623686 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 27 12:51:33 crc kubenswrapper[4900]: I0127 12:51:33.755488 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:51:33 crc kubenswrapper[4900]: I0127 12:51:33.969417 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f84976bdf-cgxdj"] Jan 27 12:51:33 crc kubenswrapper[4900]: I0127 12:51:33.969747 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" podUID="5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" containerName="dnsmasq-dns" containerID="cri-o://3e2b8649ad7da075231edd580a45c063bd7b1e428382c6f4505ce930a213049b" gracePeriod=10 Jan 27 12:51:33 crc kubenswrapper[4900]: I0127 12:51:33.978977 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 27 12:51:33 crc kubenswrapper[4900]: I0127 12:51:33.979101 4900 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 12:51:34 crc kubenswrapper[4900]: I0127 12:51:34.350996 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 27 12:51:34 crc kubenswrapper[4900]: I0127 12:51:34.597893 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" podUID="5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.183:5353: connect: connection refused" Jan 27 12:51:34 crc kubenswrapper[4900]: I0127 12:51:34.637617 4900 generic.go:334] "Generic (PLEG): container finished" podID="5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" containerID="3e2b8649ad7da075231edd580a45c063bd7b1e428382c6f4505ce930a213049b" exitCode=0 Jan 27 12:51:34 crc kubenswrapper[4900]: I0127 12:51:34.637688 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" event={"ID":"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7","Type":"ContainerDied","Data":"3e2b8649ad7da075231edd580a45c063bd7b1e428382c6f4505ce930a213049b"} Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.335511 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-dr6wx" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.350626 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.416649 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-config-data\") pod \"551282ea-7408-404c-9128-cfa4f06089f3\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.416725 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-credential-keys\") pod \"551282ea-7408-404c-9128-cfa4f06089f3\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.416793 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-scripts\") pod \"551282ea-7408-404c-9128-cfa4f06089f3\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.416852 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8c4pj\" (UniqueName: \"kubernetes.io/projected/ed853af8-ff05-4908-a82f-deceefe54dad-kube-api-access-8c4pj\") pod \"ed853af8-ff05-4908-a82f-deceefe54dad\" (UID: \"ed853af8-ff05-4908-a82f-deceefe54dad\") " Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.416895 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed853af8-ff05-4908-a82f-deceefe54dad-combined-ca-bundle\") pod \"ed853af8-ff05-4908-a82f-deceefe54dad\" (UID: \"ed853af8-ff05-4908-a82f-deceefe54dad\") " Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.416922 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ed853af8-ff05-4908-a82f-deceefe54dad-db-sync-config-data\") pod \"ed853af8-ff05-4908-a82f-deceefe54dad\" (UID: \"ed853af8-ff05-4908-a82f-deceefe54dad\") " Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.416946 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-fernet-keys\") pod \"551282ea-7408-404c-9128-cfa4f06089f3\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.417111 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5x7g\" (UniqueName: \"kubernetes.io/projected/551282ea-7408-404c-9128-cfa4f06089f3-kube-api-access-r5x7g\") pod \"551282ea-7408-404c-9128-cfa4f06089f3\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.417141 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-combined-ca-bundle\") pod \"551282ea-7408-404c-9128-cfa4f06089f3\" (UID: \"551282ea-7408-404c-9128-cfa4f06089f3\") " Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.425033 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.426565 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "551282ea-7408-404c-9128-cfa4f06089f3" (UID: "551282ea-7408-404c-9128-cfa4f06089f3"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.430193 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed853af8-ff05-4908-a82f-deceefe54dad-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ed853af8-ff05-4908-a82f-deceefe54dad" (UID: "ed853af8-ff05-4908-a82f-deceefe54dad"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.434620 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/551282ea-7408-404c-9128-cfa4f06089f3-kube-api-access-r5x7g" (OuterVolumeSpecName: "kube-api-access-r5x7g") pod "551282ea-7408-404c-9128-cfa4f06089f3" (UID: "551282ea-7408-404c-9128-cfa4f06089f3"). InnerVolumeSpecName "kube-api-access-r5x7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.441266 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed853af8-ff05-4908-a82f-deceefe54dad-kube-api-access-8c4pj" (OuterVolumeSpecName: "kube-api-access-8c4pj") pod "ed853af8-ff05-4908-a82f-deceefe54dad" (UID: "ed853af8-ff05-4908-a82f-deceefe54dad"). InnerVolumeSpecName "kube-api-access-8c4pj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.465770 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-scripts" (OuterVolumeSpecName: "scripts") pod "551282ea-7408-404c-9128-cfa4f06089f3" (UID: "551282ea-7408-404c-9128-cfa4f06089f3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.473339 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "551282ea-7408-404c-9128-cfa4f06089f3" (UID: "551282ea-7408-404c-9128-cfa4f06089f3"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.518633 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-ovsdbserver-sb\") pod \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.518798 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-ovsdbserver-nb\") pod \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.519195 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrlzt\" (UniqueName: \"kubernetes.io/projected/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-kube-api-access-lrlzt\") pod \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.519242 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-config\") pod \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.519363 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-dns-svc\") pod \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\" (UID: \"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7\") " Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.520415 4900 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.520446 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.520461 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8c4pj\" (UniqueName: \"kubernetes.io/projected/ed853af8-ff05-4908-a82f-deceefe54dad-kube-api-access-8c4pj\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.520476 4900 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ed853af8-ff05-4908-a82f-deceefe54dad-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.520487 4900 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.520497 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5x7g\" (UniqueName: \"kubernetes.io/projected/551282ea-7408-404c-9128-cfa4f06089f3-kube-api-access-r5x7g\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.521972 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed853af8-ff05-4908-a82f-deceefe54dad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed853af8-ff05-4908-a82f-deceefe54dad" (UID: "ed853af8-ff05-4908-a82f-deceefe54dad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.585362 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-kube-api-access-lrlzt" (OuterVolumeSpecName: "kube-api-access-lrlzt") pod "5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" (UID: "5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7"). InnerVolumeSpecName "kube-api-access-lrlzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.624889 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrlzt\" (UniqueName: \"kubernetes.io/projected/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-kube-api-access-lrlzt\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.624920 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed853af8-ff05-4908-a82f-deceefe54dad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.637177 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "551282ea-7408-404c-9128-cfa4f06089f3" (UID: "551282ea-7408-404c-9128-cfa4f06089f3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.661222 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-config-data" (OuterVolumeSpecName: "config-data") pod "551282ea-7408-404c-9128-cfa4f06089f3" (UID: "551282ea-7408-404c-9128-cfa4f06089f3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.704100 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.705758 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84976bdf-cgxdj" event={"ID":"5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7","Type":"ContainerDied","Data":"483bca978bf3feb121ae1c234de9916bcfde0d3cbf4324fd748626f5c775885f"} Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.705856 4900 scope.go:117] "RemoveContainer" containerID="3e2b8649ad7da075231edd580a45c063bd7b1e428382c6f4505ce930a213049b" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.727584 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.727618 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/551282ea-7408-404c-9128-cfa4f06089f3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.745217 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6hsbc" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.747637 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6hsbc" event={"ID":"551282ea-7408-404c-9128-cfa4f06089f3","Type":"ContainerDied","Data":"d71e80ea11fc30da8bb447bd2f46ae629cc687699923091027ee7430efb4696d"} Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.747665 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d71e80ea11fc30da8bb447bd2f46ae629cc687699923091027ee7430efb4696d" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.768432 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-74d8686b8d-zh4mj"] Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.774352 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" (UID: "5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.775564 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-dr6wx" event={"ID":"ed853af8-ff05-4908-a82f-deceefe54dad","Type":"ContainerDied","Data":"23fc0e28876ac33d0f10f484db5639018b61c9f0d3b12d67bf67c5c2d47b59a6"} Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.775601 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23fc0e28876ac33d0f10f484db5639018b61c9f0d3b12d67bf67c5c2d47b59a6" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.775646 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-dr6wx" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.778306 4900 scope.go:117] "RemoveContainer" containerID="560ed17ecfcf39ac8497c44933ccd5aecfba05ffea2010b93bf724e1200e1451" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.824688 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" (UID: "5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.829464 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.829493 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.858988 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" (UID: "5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.869230 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-config" (OuterVolumeSpecName: "config") pod "5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" (UID: "5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.933741 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:36 crc kubenswrapper[4900]: I0127 12:51:36.934240 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.172133 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f84976bdf-cgxdj"] Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.188338 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f84976bdf-cgxdj"] Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.637927 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cdc8bdcbf-4ltg2"] Jan 27 12:51:37 crc kubenswrapper[4900]: E0127 12:51:37.643386 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed853af8-ff05-4908-a82f-deceefe54dad" containerName="barbican-db-sync" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.643414 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed853af8-ff05-4908-a82f-deceefe54dad" containerName="barbican-db-sync" Jan 27 12:51:37 crc kubenswrapper[4900]: E0127 12:51:37.643433 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="551282ea-7408-404c-9128-cfa4f06089f3" containerName="keystone-bootstrap" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.643442 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="551282ea-7408-404c-9128-cfa4f06089f3" containerName="keystone-bootstrap" Jan 27 12:51:37 crc kubenswrapper[4900]: E0127 12:51:37.643452 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" containerName="dnsmasq-dns" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.643460 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" containerName="dnsmasq-dns" Jan 27 12:51:37 crc kubenswrapper[4900]: E0127 12:51:37.643513 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" containerName="init" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.643521 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" containerName="init" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.643786 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="551282ea-7408-404c-9128-cfa4f06089f3" containerName="keystone-bootstrap" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.643802 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" containerName="dnsmasq-dns" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.643825 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed853af8-ff05-4908-a82f-deceefe54dad" containerName="barbican-db-sync" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.660115 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.688622 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.689045 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.694932 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.694979 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.694934 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-k2fbf" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.695316 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.708351 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cdc8bdcbf-4ltg2"] Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.752976 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-59b569d4f4-bkjxc"] Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.775427 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-public-tls-certs\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.775482 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-config-data\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.775530 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-internal-tls-certs\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.775593 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-fernet-keys\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.775624 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-credential-keys\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.775651 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xln8\" (UniqueName: \"kubernetes.io/projected/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-kube-api-access-4xln8\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.775675 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-combined-ca-bundle\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.775764 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-scripts\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.776710 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.783796 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-pqf6l" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.784070 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.788225 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.831426 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-5657957b69-ncpk6"] Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.833467 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.839501 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.878218 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-nwqdh" event={"ID":"b5375696-4614-47d4-a8aa-2a98bdd0bd17","Type":"ContainerStarted","Data":"a13aabe46ae25a7b0d20ffcbc0d5601f89d5f2e811229b05c594e23f610f2c81"} Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.878240 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-combined-ca-bundle\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.878308 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-combined-ca-bundle\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.878379 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82qvb\" (UniqueName: \"kubernetes.io/projected/a644fee2-5031-470c-a287-34a8963d86c7-kube-api-access-82qvb\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.878433 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-scripts\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.879350 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-config-data-custom\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.879412 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c216bce-8265-4cdb-8104-6267c1196cc2-logs\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.879448 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-config-data\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.879472 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-public-tls-certs\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.879499 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-config-data\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.879537 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-config-data-custom\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.879569 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a644fee2-5031-470c-a287-34a8963d86c7-logs\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.879587 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-internal-tls-certs\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.879630 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjj5f\" (UniqueName: \"kubernetes.io/projected/4c216bce-8265-4cdb-8104-6267c1196cc2-kube-api-access-wjj5f\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.879685 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-config-data\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.879706 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-fernet-keys\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.879742 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-credential-keys\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.879777 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xln8\" (UniqueName: \"kubernetes.io/projected/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-kube-api-access-4xln8\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.879811 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-combined-ca-bundle\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.912422 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-public-tls-certs\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.920325 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-scripts\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.920815 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-65d58d75c7-pn4zc" event={"ID":"58051d5f-ca34-40db-9b7c-da0d095fa129","Type":"ContainerStarted","Data":"546ad62922eae7f52754cea6d7041fce4a43b8e635f8d55636f58925ee390b9f"} Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.922017 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-credential-keys\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.922123 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.924340 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-config-data\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.924394 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-74d8686b8d-zh4mj" event={"ID":"eec75b81-8153-4685-a1bc-826a5abed42b","Type":"ContainerStarted","Data":"9fecdc03187e899cfaece285cd1e39d8ff741fa2a0032d6c32dc68117d27db52"} Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.924437 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-74d8686b8d-zh4mj" event={"ID":"eec75b81-8153-4685-a1bc-826a5abed42b","Type":"ContainerStarted","Data":"e9d20eb43b716a4b3ea111dbd826d2c00ec9f87d30b4488455ebe8f9f15408a9"} Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.930793 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-fernet-keys\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.950379 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-2rf6d" event={"ID":"f3331ea7-d796-459a-9e9d-6f744ba8822b","Type":"ContainerStarted","Data":"217dbd43523938257b0b4a51576f4dcb1e6b4682f707f56b660a481b1afa3131"} Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.953666 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-internal-tls-certs\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.957559 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xln8\" (UniqueName: \"kubernetes.io/projected/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-kube-api-access-4xln8\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:37 crc kubenswrapper[4900]: I0127 12:51:37.994082 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-59b569d4f4-bkjxc"] Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:37.999165 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-combined-ca-bundle\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:37.999228 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-combined-ca-bundle\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:37.999288 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82qvb\" (UniqueName: \"kubernetes.io/projected/a644fee2-5031-470c-a287-34a8963d86c7-kube-api-access-82qvb\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:37.999348 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-config-data-custom\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:37.999396 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c216bce-8265-4cdb-8104-6267c1196cc2-logs\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:37.999421 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-config-data\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:37.999454 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-config-data-custom\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:37.999479 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a644fee2-5031-470c-a287-34a8963d86c7-logs\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:37.999520 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjj5f\" (UniqueName: \"kubernetes.io/projected/4c216bce-8265-4cdb-8104-6267c1196cc2-kube-api-access-wjj5f\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:37.999595 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-config-data\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.000213 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cef6642f-e1e2-460a-9d5d-6d1c797cf79a-combined-ca-bundle\") pod \"keystone-cdc8bdcbf-4ltg2\" (UID: \"cef6642f-e1e2-460a-9d5d-6d1c797cf79a\") " pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.018852 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c216bce-8265-4cdb-8104-6267c1196cc2-logs\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.019223 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a644fee2-5031-470c-a287-34a8963d86c7-logs\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.024865 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed","Type":"ContainerStarted","Data":"ce2accb87d865554efa37cb00ff0fd8f6420bee61fc34aa92160a5cbf58d16a2"} Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.028823 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-config-data-custom\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.030566 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-config-data\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.034136 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-config-data\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.034608 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-combined-ca-bundle\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.039544 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-combined-ca-bundle\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.041864 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-config-data-custom\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.063021 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.090822 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82qvb\" (UniqueName: \"kubernetes.io/projected/a644fee2-5031-470c-a287-34a8963d86c7-kube-api-access-82qvb\") pod \"barbican-keystone-listener-59b569d4f4-bkjxc\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.091363 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjj5f\" (UniqueName: \"kubernetes.io/projected/4c216bce-8265-4cdb-8104-6267c1196cc2-kube-api-access-wjj5f\") pod \"barbican-worker-5657957b69-ncpk6\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.178555 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.214973 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5657957b69-ncpk6"] Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.246904 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.369123 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7d649d8c65-gkl5t"] Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.372075 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.389833 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-65d58d75c7-pn4zc" podStartSLOduration=12.38980671 podStartE2EDuration="12.38980671s" podCreationTimestamp="2026-01-27 12:51:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:51:38.070249295 +0000 UTC m=+1525.307277505" watchObservedRunningTime="2026-01-27 12:51:38.38980671 +0000 UTC m=+1525.626834920" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.416649 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5nw6\" (UniqueName: \"kubernetes.io/projected/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-kube-api-access-q5nw6\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.416695 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-config\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.416729 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-dns-svc\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.416871 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-ovsdbserver-sb\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.416996 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-ovsdbserver-nb\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.440839 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d649d8c65-gkl5t"] Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.447855 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-nwqdh" podStartSLOduration=4.695434512 podStartE2EDuration="55.447824402s" podCreationTimestamp="2026-01-27 12:50:43 +0000 UTC" firstStartedPulling="2026-01-27 12:50:45.424235663 +0000 UTC m=+1472.661263873" lastFinishedPulling="2026-01-27 12:51:36.176625563 +0000 UTC m=+1523.413653763" observedRunningTime="2026-01-27 12:51:38.127994168 +0000 UTC m=+1525.365022378" watchObservedRunningTime="2026-01-27 12:51:38.447824402 +0000 UTC m=+1525.684852612" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.510824 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-2rf6d" podStartSLOduration=7.328784919 podStartE2EDuration="55.510796598s" podCreationTimestamp="2026-01-27 12:50:43 +0000 UTC" firstStartedPulling="2026-01-27 12:50:47.99518407 +0000 UTC m=+1475.232212280" lastFinishedPulling="2026-01-27 12:51:36.177195739 +0000 UTC m=+1523.414223959" observedRunningTime="2026-01-27 12:51:38.176611976 +0000 UTC m=+1525.413640186" watchObservedRunningTime="2026-01-27 12:51:38.510796598 +0000 UTC m=+1525.747824808" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.524508 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-ovsdbserver-sb\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.524713 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-ovsdbserver-nb\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.524803 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5nw6\" (UniqueName: \"kubernetes.io/projected/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-kube-api-access-q5nw6\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.524829 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-config\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.524872 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-dns-svc\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.536239 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-ovsdbserver-sb\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.537287 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-config\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.538608 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-dns-svc\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.606743 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-ovsdbserver-nb\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.640042 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7" path="/var/lib/kubelet/pods/5be1ee3b-28f0-4dd0-83cb-c0f7ce2093a7/volumes" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.641850 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5nw6\" (UniqueName: \"kubernetes.io/projected/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-kube-api-access-q5nw6\") pod \"dnsmasq-dns-7d649d8c65-gkl5t\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.642670 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-85c4fff8fd-7tgsz"] Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.662197 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.667890 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-85c4fff8fd-7tgsz"] Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.688426 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.720548 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.720825 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-869c8dd855-nf22j"] Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.738409 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.741075 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-699cfff846-mfrc2"] Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.772228 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.807672 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-config-data-custom\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.807874 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-logs\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.808435 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-config-data\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.808577 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j955b\" (UniqueName: \"kubernetes.io/projected/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-kube-api-access-j955b\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.808773 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-combined-ca-bundle\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.934465 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-config-data\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.934832 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rf7ml\" (UniqueName: \"kubernetes.io/projected/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-kube-api-access-rf7ml\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.934859 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-logs\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.934959 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j955b\" (UniqueName: \"kubernetes.io/projected/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-kube-api-access-j955b\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.935008 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-config-data-custom\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.935114 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-logs\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.935187 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-combined-ca-bundle\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.935320 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-config-data-custom\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.935374 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-config-data\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.935397 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-config-data\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.935469 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-logs\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.935507 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-config-data-custom\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.939964 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-869c8dd855-nf22j"] Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.943379 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-combined-ca-bundle\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.943457 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-combined-ca-bundle\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.946196 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbchn\" (UniqueName: \"kubernetes.io/projected/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-kube-api-access-xbchn\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.977772 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-config-data-custom\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.987101 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-logs\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:38 crc kubenswrapper[4900]: I0127 12:51:38.992428 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-combined-ca-bundle\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:38.993038 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-config-data\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.019073 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-699cfff846-mfrc2"] Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.044200 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j955b\" (UniqueName: \"kubernetes.io/projected/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-kube-api-access-j955b\") pod \"barbican-api-85c4fff8fd-7tgsz\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.051490 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rf7ml\" (UniqueName: \"kubernetes.io/projected/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-kube-api-access-rf7ml\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.051554 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-logs\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.051646 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-config-data-custom\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.051712 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-logs\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.051892 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-config-data\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.051919 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-config-data\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.051972 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-config-data-custom\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.052043 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-combined-ca-bundle\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.052134 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-combined-ca-bundle\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.052253 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbchn\" (UniqueName: \"kubernetes.io/projected/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-kube-api-access-xbchn\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.054678 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-logs\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.056721 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-74d8686b8d-zh4mj" event={"ID":"eec75b81-8153-4685-a1bc-826a5abed42b","Type":"ContainerStarted","Data":"cc3d4bff0e2dd5d2a1fd6776fce0d447d0927113092e33e5d3678be35abfc8c6"} Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.056833 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.056873 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.056961 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-logs\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.068190 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-config-data\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.079792 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-combined-ca-bundle\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.081189 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-config-data-custom\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.081939 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rf7ml\" (UniqueName: \"kubernetes.io/projected/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-kube-api-access-rf7ml\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.082347 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-config-data\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.082887 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c-config-data-custom\") pod \"barbican-keystone-listener-699cfff846-mfrc2\" (UID: \"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c\") " pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.083302 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-combined-ca-bundle\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.102956 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbchn\" (UniqueName: \"kubernetes.io/projected/e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb-kube-api-access-xbchn\") pod \"barbican-worker-869c8dd855-nf22j\" (UID: \"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb\") " pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.112020 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-869c8dd855-nf22j" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.129367 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-69877cf4b6-8ncvq"] Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.132103 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.157971 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.161041 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-69877cf4b6-8ncvq"] Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.262942 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-combined-ca-bundle\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.263091 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-logs\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.263274 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-config-data\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.263306 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pk4s2\" (UniqueName: \"kubernetes.io/projected/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-kube-api-access-pk4s2\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.263341 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-config-data-custom\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.320826 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.362977 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cdc8bdcbf-4ltg2"] Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.372258 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-config-data\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.372362 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pk4s2\" (UniqueName: \"kubernetes.io/projected/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-kube-api-access-pk4s2\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.372445 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-config-data-custom\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.372496 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-combined-ca-bundle\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.372682 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-logs\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.374593 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-logs\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.388979 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-config-data-custom\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.395873 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-combined-ca-bundle\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.411797 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pk4s2\" (UniqueName: \"kubernetes.io/projected/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-kube-api-access-pk4s2\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.413020 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-config-data\") pod \"barbican-api-69877cf4b6-8ncvq\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.417509 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-74d8686b8d-zh4mj" podStartSLOduration=7.417481968 podStartE2EDuration="7.417481968s" podCreationTimestamp="2026-01-27 12:51:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:51:39.09196478 +0000 UTC m=+1526.328992990" watchObservedRunningTime="2026-01-27 12:51:39.417481968 +0000 UTC m=+1526.654510168" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.433922 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.888903 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-59b569d4f4-bkjxc"] Jan 27 12:51:39 crc kubenswrapper[4900]: I0127 12:51:39.949460 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d649d8c65-gkl5t"] Jan 27 12:51:40 crc kubenswrapper[4900]: I0127 12:51:39.991467 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5657957b69-ncpk6"] Jan 27 12:51:40 crc kubenswrapper[4900]: W0127 12:51:40.011857 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c216bce_8265_4cdb_8104_6267c1196cc2.slice/crio-093fa79bcd6f44b8a67cd3113d871d429307d75c8c97c9b12837fdbae88949c3 WatchSource:0}: Error finding container 093fa79bcd6f44b8a67cd3113d871d429307d75c8c97c9b12837fdbae88949c3: Status 404 returned error can't find the container with id 093fa79bcd6f44b8a67cd3113d871d429307d75c8c97c9b12837fdbae88949c3 Jan 27 12:51:40 crc kubenswrapper[4900]: I0127 12:51:40.125604 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" event={"ID":"a644fee2-5031-470c-a287-34a8963d86c7","Type":"ContainerStarted","Data":"2334fb0a8a93290269c8f8d4127e11b7a23b041e00afc292d6a4ef5b379574f6"} Jan 27 12:51:40 crc kubenswrapper[4900]: I0127 12:51:40.152030 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5657957b69-ncpk6" event={"ID":"4c216bce-8265-4cdb-8104-6267c1196cc2","Type":"ContainerStarted","Data":"093fa79bcd6f44b8a67cd3113d871d429307d75c8c97c9b12837fdbae88949c3"} Jan 27 12:51:40 crc kubenswrapper[4900]: I0127 12:51:40.169142 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cdc8bdcbf-4ltg2" event={"ID":"cef6642f-e1e2-460a-9d5d-6d1c797cf79a","Type":"ContainerStarted","Data":"65eae6a6f74e991e1da69d8b94342a114d5c90b57b2d20d35fc5e2a9701d13f5"} Jan 27 12:51:40 crc kubenswrapper[4900]: I0127 12:51:40.169225 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cdc8bdcbf-4ltg2" event={"ID":"cef6642f-e1e2-460a-9d5d-6d1c797cf79a","Type":"ContainerStarted","Data":"4a4dbf7e22ece2785299d2943ccc6c928187f61042bac0d71547ddb261c5f650"} Jan 27 12:51:40 crc kubenswrapper[4900]: I0127 12:51:40.170156 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:51:40 crc kubenswrapper[4900]: I0127 12:51:40.178804 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" event={"ID":"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4","Type":"ContainerStarted","Data":"e06ba6fa624b74f9c153b9430b19c596d739250fd10210ffd6338bb35c56a9f5"} Jan 27 12:51:40 crc kubenswrapper[4900]: I0127 12:51:40.223638 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cdc8bdcbf-4ltg2" podStartSLOduration=3.223615348 podStartE2EDuration="3.223615348s" podCreationTimestamp="2026-01-27 12:51:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:51:40.196157488 +0000 UTC m=+1527.433185698" watchObservedRunningTime="2026-01-27 12:51:40.223615348 +0000 UTC m=+1527.460643558" Jan 27 12:51:40 crc kubenswrapper[4900]: I0127 12:51:40.260439 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-869c8dd855-nf22j"] Jan 27 12:51:40 crc kubenswrapper[4900]: I0127 12:51:40.276292 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-699cfff846-mfrc2"] Jan 27 12:51:40 crc kubenswrapper[4900]: I0127 12:51:40.635764 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-85c4fff8fd-7tgsz"] Jan 27 12:51:40 crc kubenswrapper[4900]: I0127 12:51:40.647447 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-69877cf4b6-8ncvq"] Jan 27 12:51:41 crc kubenswrapper[4900]: I0127 12:51:41.208632 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-85c4fff8fd-7tgsz" event={"ID":"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2","Type":"ContainerStarted","Data":"4719a19c48aae149ade5d7793c218bb246da50101d4c8add508ae19c3e7ec9c3"} Jan 27 12:51:41 crc kubenswrapper[4900]: I0127 12:51:41.208997 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-85c4fff8fd-7tgsz" event={"ID":"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2","Type":"ContainerStarted","Data":"5beffe6f22919ef74c77d1d94da786d8b46e601bc66b93fff77410f1e51c3271"} Jan 27 12:51:41 crc kubenswrapper[4900]: I0127 12:51:41.212362 4900 generic.go:334] "Generic (PLEG): container finished" podID="2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" containerID="97c59a9978fa25a5c6166781a9958b79a4044783115272e29f9a4d4705040702" exitCode=0 Jan 27 12:51:41 crc kubenswrapper[4900]: I0127 12:51:41.212613 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" event={"ID":"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4","Type":"ContainerDied","Data":"97c59a9978fa25a5c6166781a9958b79a4044783115272e29f9a4d4705040702"} Jan 27 12:51:41 crc kubenswrapper[4900]: I0127 12:51:41.218808 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-869c8dd855-nf22j" event={"ID":"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb","Type":"ContainerStarted","Data":"95bb27bcf319ccca27c88aea0d366488b2f1d128ff6fc0bad28e4a2fff694716"} Jan 27 12:51:41 crc kubenswrapper[4900]: I0127 12:51:41.226473 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69877cf4b6-8ncvq" event={"ID":"f6e2aa6d-6318-4253-9af3-90f151d7dbc3","Type":"ContainerStarted","Data":"db20b1da3145bc96ddf91689bf6a195f288a8e67287e52fa5f6b59a54cc3e2d3"} Jan 27 12:51:41 crc kubenswrapper[4900]: I0127 12:51:41.245817 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" event={"ID":"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c","Type":"ContainerStarted","Data":"652ce4d34175b76615e596847a2d7a6868506332140333a2bcb91c19408a176a"} Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.098919 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-69877cf4b6-8ncvq"] Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.123850 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-c6f4bcb48-jgthx"] Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.126798 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.135415 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.135657 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.174359 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-c6f4bcb48-jgthx"] Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.178396 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-combined-ca-bundle\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.178468 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2lfx\" (UniqueName: \"kubernetes.io/projected/2550873b-f8d1-4bfe-8155-64f7d0929058-kube-api-access-v2lfx\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.178671 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-config-data-custom\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.178731 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-config-data\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.179027 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-internal-tls-certs\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.179102 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-public-tls-certs\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.179280 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2550873b-f8d1-4bfe-8155-64f7d0929058-logs\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.274686 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" event={"ID":"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4","Type":"ContainerStarted","Data":"af53cdca6be00d361acb6abf0b5978ddd065452652f23db0b4ad40494c33394b"} Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.277201 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.282324 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-config-data-custom\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.282384 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-config-data\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.282517 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-internal-tls-certs\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.282540 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-public-tls-certs\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.282606 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2550873b-f8d1-4bfe-8155-64f7d0929058-logs\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.282731 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-combined-ca-bundle\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.282770 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2lfx\" (UniqueName: \"kubernetes.io/projected/2550873b-f8d1-4bfe-8155-64f7d0929058-kube-api-access-v2lfx\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.284777 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2550873b-f8d1-4bfe-8155-64f7d0929058-logs\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.289576 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69877cf4b6-8ncvq" event={"ID":"f6e2aa6d-6318-4253-9af3-90f151d7dbc3","Type":"ContainerStarted","Data":"f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b"} Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.293116 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-combined-ca-bundle\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.295656 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-config-data\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.296548 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-config-data-custom\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.297726 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-public-tls-certs\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.298192 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-85c4fff8fd-7tgsz" event={"ID":"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2","Type":"ContainerStarted","Data":"01b7dc8bda1b099923b8339857bc48311738c75e7b4052871f1e1f9cbd624a83"} Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.298274 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.298407 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.298444 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2550873b-f8d1-4bfe-8155-64f7d0929058-internal-tls-certs\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.304310 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2lfx\" (UniqueName: \"kubernetes.io/projected/2550873b-f8d1-4bfe-8155-64f7d0929058-kube-api-access-v2lfx\") pod \"barbican-api-c6f4bcb48-jgthx\" (UID: \"2550873b-f8d1-4bfe-8155-64f7d0929058\") " pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.331900 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" podStartSLOduration=4.331867477 podStartE2EDuration="4.331867477s" podCreationTimestamp="2026-01-27 12:51:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:51:42.312229215 +0000 UTC m=+1529.549257415" watchObservedRunningTime="2026-01-27 12:51:42.331867477 +0000 UTC m=+1529.568895687" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.354632 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-85c4fff8fd-7tgsz" podStartSLOduration=4.35459976 podStartE2EDuration="4.35459976s" podCreationTimestamp="2026-01-27 12:51:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:51:42.338763928 +0000 UTC m=+1529.575792138" watchObservedRunningTime="2026-01-27 12:51:42.35459976 +0000 UTC m=+1529.591627970" Jan 27 12:51:42 crc kubenswrapper[4900]: I0127 12:51:42.467706 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:43 crc kubenswrapper[4900]: I0127 12:51:43.323923 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-69877cf4b6-8ncvq" podUID="f6e2aa6d-6318-4253-9af3-90f151d7dbc3" containerName="barbican-api-log" containerID="cri-o://f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b" gracePeriod=30 Jan 27 12:51:43 crc kubenswrapper[4900]: I0127 12:51:43.324685 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-69877cf4b6-8ncvq" podUID="f6e2aa6d-6318-4253-9af3-90f151d7dbc3" containerName="barbican-api" containerID="cri-o://74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3" gracePeriod=30 Jan 27 12:51:43 crc kubenswrapper[4900]: I0127 12:51:43.324231 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69877cf4b6-8ncvq" event={"ID":"f6e2aa6d-6318-4253-9af3-90f151d7dbc3","Type":"ContainerStarted","Data":"74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3"} Jan 27 12:51:43 crc kubenswrapper[4900]: I0127 12:51:43.325197 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:43 crc kubenswrapper[4900]: I0127 12:51:43.325215 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:43 crc kubenswrapper[4900]: I0127 12:51:43.364839 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-69877cf4b6-8ncvq" podStartSLOduration=5.364808588 podStartE2EDuration="5.364808588s" podCreationTimestamp="2026-01-27 12:51:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:51:43.346395961 +0000 UTC m=+1530.583424181" watchObservedRunningTime="2026-01-27 12:51:43.364808588 +0000 UTC m=+1530.601836808" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.198925 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-c6f4bcb48-jgthx"] Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.353900 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.373287 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"52d4bbbff6d0b60a8d984b4e3e9a2a71729fca7f42b92fbb3430f8589b9e7080"} Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.410847 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-c6f4bcb48-jgthx" event={"ID":"2550873b-f8d1-4bfe-8155-64f7d0929058","Type":"ContainerStarted","Data":"3b6aff1c024c1b2ab8eaafcc8908e9b47b7aace53dee7bfb79bd16426f4b3474"} Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.423234 4900 generic.go:334] "Generic (PLEG): container finished" podID="f6e2aa6d-6318-4253-9af3-90f151d7dbc3" containerID="74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3" exitCode=0 Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.423279 4900 generic.go:334] "Generic (PLEG): container finished" podID="f6e2aa6d-6318-4253-9af3-90f151d7dbc3" containerID="f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b" exitCode=143 Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.423337 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69877cf4b6-8ncvq" event={"ID":"f6e2aa6d-6318-4253-9af3-90f151d7dbc3","Type":"ContainerDied","Data":"74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3"} Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.423375 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69877cf4b6-8ncvq" event={"ID":"f6e2aa6d-6318-4253-9af3-90f151d7dbc3","Type":"ContainerDied","Data":"f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b"} Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.423388 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69877cf4b6-8ncvq" event={"ID":"f6e2aa6d-6318-4253-9af3-90f151d7dbc3","Type":"ContainerDied","Data":"db20b1da3145bc96ddf91689bf6a195f288a8e67287e52fa5f6b59a54cc3e2d3"} Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.423409 4900 scope.go:117] "RemoveContainer" containerID="74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.423601 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-69877cf4b6-8ncvq" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.455919 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" event={"ID":"a644fee2-5031-470c-a287-34a8963d86c7","Type":"ContainerStarted","Data":"07095d5ca62ecb4330fb45031bd07ea2698b438ba2ee91ee52daf2922b4eff50"} Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.463041 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" event={"ID":"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c","Type":"ContainerStarted","Data":"30500d30bf415e3eb5587fe1daecf3e021e76dbb1f4c57225bb82798bb2402e7"} Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.478162 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-869c8dd855-nf22j" event={"ID":"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb","Type":"ContainerStarted","Data":"bcaa245ed760865f213820c12968e17f3c79532722dbf026726368fb178c07c6"} Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.501143 4900 scope.go:117] "RemoveContainer" containerID="f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.511474 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-config-data-custom\") pod \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.513505 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pk4s2\" (UniqueName: \"kubernetes.io/projected/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-kube-api-access-pk4s2\") pod \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.513548 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-logs\") pod \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.513671 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-combined-ca-bundle\") pod \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.514030 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-logs" (OuterVolumeSpecName: "logs") pod "f6e2aa6d-6318-4253-9af3-90f151d7dbc3" (UID: "f6e2aa6d-6318-4253-9af3-90f151d7dbc3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.515635 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-config-data\") pod \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\" (UID: \"f6e2aa6d-6318-4253-9af3-90f151d7dbc3\") " Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.516587 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.522788 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f6e2aa6d-6318-4253-9af3-90f151d7dbc3" (UID: "f6e2aa6d-6318-4253-9af3-90f151d7dbc3"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.530622 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-kube-api-access-pk4s2" (OuterVolumeSpecName: "kube-api-access-pk4s2") pod "f6e2aa6d-6318-4253-9af3-90f151d7dbc3" (UID: "f6e2aa6d-6318-4253-9af3-90f151d7dbc3"). InnerVolumeSpecName "kube-api-access-pk4s2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.603660 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f6e2aa6d-6318-4253-9af3-90f151d7dbc3" (UID: "f6e2aa6d-6318-4253-9af3-90f151d7dbc3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.621047 4900 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.621107 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pk4s2\" (UniqueName: \"kubernetes.io/projected/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-kube-api-access-pk4s2\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.621125 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.649675 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-config-data" (OuterVolumeSpecName: "config-data") pod "f6e2aa6d-6318-4253-9af3-90f151d7dbc3" (UID: "f6e2aa6d-6318-4253-9af3-90f151d7dbc3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.726449 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6e2aa6d-6318-4253-9af3-90f151d7dbc3-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.895105 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5657957b69-ncpk6" event={"ID":"4c216bce-8265-4cdb-8104-6267c1196cc2","Type":"ContainerStarted","Data":"8c43da896c9f4193b0949bf2d40b57eec81f0be251163c55686a409d54e75844"} Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.938357 4900 scope.go:117] "RemoveContainer" containerID="74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3" Jan 27 12:51:44 crc kubenswrapper[4900]: E0127 12:51:44.943456 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3\": container with ID starting with 74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3 not found: ID does not exist" containerID="74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.943517 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3"} err="failed to get container status \"74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3\": rpc error: code = NotFound desc = could not find container \"74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3\": container with ID starting with 74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3 not found: ID does not exist" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.943556 4900 scope.go:117] "RemoveContainer" containerID="f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b" Jan 27 12:51:44 crc kubenswrapper[4900]: E0127 12:51:44.944316 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b\": container with ID starting with f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b not found: ID does not exist" containerID="f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.944346 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b"} err="failed to get container status \"f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b\": rpc error: code = NotFound desc = could not find container \"f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b\": container with ID starting with f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b not found: ID does not exist" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.944369 4900 scope.go:117] "RemoveContainer" containerID="74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.945265 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3"} err="failed to get container status \"74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3\": rpc error: code = NotFound desc = could not find container \"74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3\": container with ID starting with 74820ac944a5df1862ff2e21713fc71b2345fa81baa222a90be455247fbfffd3 not found: ID does not exist" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.945293 4900 scope.go:117] "RemoveContainer" containerID="f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.946539 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b"} err="failed to get container status \"f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b\": rpc error: code = NotFound desc = could not find container \"f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b\": container with ID starting with f323dbe23b49b97b30417cd53285943fd86bb69dc1ee0fbf67b2a2b2e0692d3b not found: ID does not exist" Jan 27 12:51:44 crc kubenswrapper[4900]: I0127 12:51:44.989548 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-69877cf4b6-8ncvq"] Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.003362 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-69877cf4b6-8ncvq"] Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.537214 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" event={"ID":"a644fee2-5031-470c-a287-34a8963d86c7","Type":"ContainerStarted","Data":"5a947b4b0098e3c9a2e3a190832f11ce33e05671f72352a8f7046c772ac88153"} Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.566818 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" event={"ID":"d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c","Type":"ContainerStarted","Data":"149d7c1f408207193901a826c6c9a96aa38a06c26b87f63d800fbadabb93fa5f"} Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.585584 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-869c8dd855-nf22j" event={"ID":"e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb","Type":"ContainerStarted","Data":"b6a0f5a675fe8043761e13573238a87eefb7b8199dc212096c2286f3edd06faf"} Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.586042 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" podStartSLOduration=4.943415712 podStartE2EDuration="8.586003619s" podCreationTimestamp="2026-01-27 12:51:37 +0000 UTC" firstStartedPulling="2026-01-27 12:51:39.958389946 +0000 UTC m=+1527.195418156" lastFinishedPulling="2026-01-27 12:51:43.600977853 +0000 UTC m=+1530.838006063" observedRunningTime="2026-01-27 12:51:45.564696148 +0000 UTC m=+1532.801724358" watchObservedRunningTime="2026-01-27 12:51:45.586003619 +0000 UTC m=+1532.823031829" Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.604363 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5657957b69-ncpk6" event={"ID":"4c216bce-8265-4cdb-8104-6267c1196cc2","Type":"ContainerStarted","Data":"1e5fde54d05e0651d0427012836c57a018b958468e6b63d0de2ee6027e36a9d6"} Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.612708 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-699cfff846-mfrc2" podStartSLOduration=4.325309815 podStartE2EDuration="7.612682327s" podCreationTimestamp="2026-01-27 12:51:38 +0000 UTC" firstStartedPulling="2026-01-27 12:51:40.277928031 +0000 UTC m=+1527.514956241" lastFinishedPulling="2026-01-27 12:51:43.565300543 +0000 UTC m=+1530.802328753" observedRunningTime="2026-01-27 12:51:45.592787507 +0000 UTC m=+1532.829815707" watchObservedRunningTime="2026-01-27 12:51:45.612682327 +0000 UTC m=+1532.849710537" Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.634440 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-869c8dd855-nf22j" podStartSLOduration=4.2679121030000005 podStartE2EDuration="7.634410041s" podCreationTimestamp="2026-01-27 12:51:38 +0000 UTC" firstStartedPulling="2026-01-27 12:51:40.253159239 +0000 UTC m=+1527.490187449" lastFinishedPulling="2026-01-27 12:51:43.619657177 +0000 UTC m=+1530.856685387" observedRunningTime="2026-01-27 12:51:45.632595678 +0000 UTC m=+1532.869623888" watchObservedRunningTime="2026-01-27 12:51:45.634410041 +0000 UTC m=+1532.871438251" Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.662900 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"aac0ff2791b9b9081d884b1769e900624b3c5bdf0a155ba10e05c3d998cfd8cd"} Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.685109 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-c6f4bcb48-jgthx" event={"ID":"2550873b-f8d1-4bfe-8155-64f7d0929058","Type":"ContainerStarted","Data":"9ca9802a8b2999819e1fd278b8560df796284ab591739ed678178c5ac7e51693"} Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.685502 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-c6f4bcb48-jgthx" event={"ID":"2550873b-f8d1-4bfe-8155-64f7d0929058","Type":"ContainerStarted","Data":"0a70fa9466b173de4bb8cc63e943bc3883d71fe0d7203126e48a57b7c0c021ed"} Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.685519 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.685533 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.705026 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-59b569d4f4-bkjxc"] Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.754202 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-5657957b69-ncpk6"] Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.781148 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-5657957b69-ncpk6" podStartSLOduration=5.217228795 podStartE2EDuration="8.781104267s" podCreationTimestamp="2026-01-27 12:51:37 +0000 UTC" firstStartedPulling="2026-01-27 12:51:40.025310837 +0000 UTC m=+1527.262339047" lastFinishedPulling="2026-01-27 12:51:43.589186309 +0000 UTC m=+1530.826214519" observedRunningTime="2026-01-27 12:51:45.689586849 +0000 UTC m=+1532.926615059" watchObservedRunningTime="2026-01-27 12:51:45.781104267 +0000 UTC m=+1533.018132477" Jan 27 12:51:45 crc kubenswrapper[4900]: I0127 12:51:45.804078 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-c6f4bcb48-jgthx" podStartSLOduration=3.804036996 podStartE2EDuration="3.804036996s" podCreationTimestamp="2026-01-27 12:51:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:51:45.71671126 +0000 UTC m=+1532.953739470" watchObservedRunningTime="2026-01-27 12:51:45.804036996 +0000 UTC m=+1533.041065206" Jan 27 12:51:46 crc kubenswrapper[4900]: I0127 12:51:46.533241 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6e2aa6d-6318-4253-9af3-90f151d7dbc3" path="/var/lib/kubelet/pods/f6e2aa6d-6318-4253-9af3-90f151d7dbc3/volumes" Jan 27 12:51:46 crc kubenswrapper[4900]: I0127 12:51:46.760767 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"8e17271618efebe06c5271024f8716c9b265fcc972a14de24da0531c3642698f"} Jan 27 12:51:46 crc kubenswrapper[4900]: I0127 12:51:46.760855 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"3cf16bd0682faa9c075564a629c1308c2cef61024c0d6775b346bb5fcd365647"} Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.059123 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lkpt6"] Jan 27 12:51:47 crc kubenswrapper[4900]: E0127 12:51:47.060183 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6e2aa6d-6318-4253-9af3-90f151d7dbc3" containerName="barbican-api-log" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.060218 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6e2aa6d-6318-4253-9af3-90f151d7dbc3" containerName="barbican-api-log" Jan 27 12:51:47 crc kubenswrapper[4900]: E0127 12:51:47.060263 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6e2aa6d-6318-4253-9af3-90f151d7dbc3" containerName="barbican-api" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.060273 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6e2aa6d-6318-4253-9af3-90f151d7dbc3" containerName="barbican-api" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.060626 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6e2aa6d-6318-4253-9af3-90f151d7dbc3" containerName="barbican-api-log" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.060668 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6e2aa6d-6318-4253-9af3-90f151d7dbc3" containerName="barbican-api" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.063513 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.111200 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lkpt6"] Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.122746 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a68b9744-48fb-4c32-ac22-7506057a0496-catalog-content\") pod \"redhat-marketplace-lkpt6\" (UID: \"a68b9744-48fb-4c32-ac22-7506057a0496\") " pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.122869 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a68b9744-48fb-4c32-ac22-7506057a0496-utilities\") pod \"redhat-marketplace-lkpt6\" (UID: \"a68b9744-48fb-4c32-ac22-7506057a0496\") " pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.122910 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5p59\" (UniqueName: \"kubernetes.io/projected/a68b9744-48fb-4c32-ac22-7506057a0496-kube-api-access-q5p59\") pod \"redhat-marketplace-lkpt6\" (UID: \"a68b9744-48fb-4c32-ac22-7506057a0496\") " pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.226021 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a68b9744-48fb-4c32-ac22-7506057a0496-catalog-content\") pod \"redhat-marketplace-lkpt6\" (UID: \"a68b9744-48fb-4c32-ac22-7506057a0496\") " pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.226185 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a68b9744-48fb-4c32-ac22-7506057a0496-utilities\") pod \"redhat-marketplace-lkpt6\" (UID: \"a68b9744-48fb-4c32-ac22-7506057a0496\") " pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.226229 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5p59\" (UniqueName: \"kubernetes.io/projected/a68b9744-48fb-4c32-ac22-7506057a0496-kube-api-access-q5p59\") pod \"redhat-marketplace-lkpt6\" (UID: \"a68b9744-48fb-4c32-ac22-7506057a0496\") " pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.227586 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a68b9744-48fb-4c32-ac22-7506057a0496-catalog-content\") pod \"redhat-marketplace-lkpt6\" (UID: \"a68b9744-48fb-4c32-ac22-7506057a0496\") " pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.228155 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a68b9744-48fb-4c32-ac22-7506057a0496-utilities\") pod \"redhat-marketplace-lkpt6\" (UID: \"a68b9744-48fb-4c32-ac22-7506057a0496\") " pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.260116 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5p59\" (UniqueName: \"kubernetes.io/projected/a68b9744-48fb-4c32-ac22-7506057a0496-kube-api-access-q5p59\") pod \"redhat-marketplace-lkpt6\" (UID: \"a68b9744-48fb-4c32-ac22-7506057a0496\") " pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.395874 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.801436 4900 generic.go:334] "Generic (PLEG): container finished" podID="b5375696-4614-47d4-a8aa-2a98bdd0bd17" containerID="a13aabe46ae25a7b0d20ffcbc0d5601f89d5f2e811229b05c594e23f610f2c81" exitCode=0 Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.801503 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-nwqdh" event={"ID":"b5375696-4614-47d4-a8aa-2a98bdd0bd17","Type":"ContainerDied","Data":"a13aabe46ae25a7b0d20ffcbc0d5601f89d5f2e811229b05c594e23f610f2c81"} Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.832239 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"f01c75d0ba5c4aeed0aef8c8d5bed2033a9f19fcc6b6de634fbdd8f108134a95"} Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.832299 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"0c2f90a4-baa0-4eeb-a797-3664c306818b","Type":"ContainerStarted","Data":"809c7d0d054a9c51d4d4497d1582784215fe9d5ad6b2e155910bcf66c64404b0"} Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.832422 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" podUID="a644fee2-5031-470c-a287-34a8963d86c7" containerName="barbican-keystone-listener-log" containerID="cri-o://07095d5ca62ecb4330fb45031bd07ea2698b438ba2ee91ee52daf2922b4eff50" gracePeriod=30 Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.832521 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" podUID="a644fee2-5031-470c-a287-34a8963d86c7" containerName="barbican-keystone-listener" containerID="cri-o://5a947b4b0098e3c9a2e3a190832f11ce33e05671f72352a8f7046c772ac88153" gracePeriod=30 Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.832913 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-5657957b69-ncpk6" podUID="4c216bce-8265-4cdb-8104-6267c1196cc2" containerName="barbican-worker-log" containerID="cri-o://8c43da896c9f4193b0949bf2d40b57eec81f0be251163c55686a409d54e75844" gracePeriod=30 Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.832983 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-5657957b69-ncpk6" podUID="4c216bce-8265-4cdb-8104-6267c1196cc2" containerName="barbican-worker" containerID="cri-o://1e5fde54d05e0651d0427012836c57a018b958468e6b63d0de2ee6027e36a9d6" gracePeriod=30 Jan 27 12:51:47 crc kubenswrapper[4900]: I0127 12:51:47.886191 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=71.368597282 podStartE2EDuration="2m12.886158662s" podCreationTimestamp="2026-01-27 12:49:35 +0000 UTC" firstStartedPulling="2026-01-27 12:50:42.070861487 +0000 UTC m=+1469.307889697" lastFinishedPulling="2026-01-27 12:51:43.588422857 +0000 UTC m=+1530.825451077" observedRunningTime="2026-01-27 12:51:47.878448897 +0000 UTC m=+1535.115477107" watchObservedRunningTime="2026-01-27 12:51:47.886158662 +0000 UTC m=+1535.123186872" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.229891 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d649d8c65-gkl5t"] Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.230176 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" podUID="2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" containerName="dnsmasq-dns" containerID="cri-o://af53cdca6be00d361acb6abf0b5978ddd065452652f23db0b4ad40494c33394b" gracePeriod=10 Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.233474 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.282407 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-hr4zk"] Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.300459 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.304753 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.362989 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.363047 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.363197 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-config\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.368701 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.368995 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.369137 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssx9p\" (UniqueName: \"kubernetes.io/projected/0f50a23b-28bf-4815-beff-ed18fc15d800-kube-api-access-ssx9p\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.449397 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-hr4zk"] Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.471900 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.471980 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssx9p\" (UniqueName: \"kubernetes.io/projected/0f50a23b-28bf-4815-beff-ed18fc15d800-kube-api-access-ssx9p\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.472080 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.472103 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.472154 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-config\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.472223 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.473269 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.473389 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.473659 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.473937 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.474015 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-config\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.545577 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssx9p\" (UniqueName: \"kubernetes.io/projected/0f50a23b-28bf-4815-beff-ed18fc15d800-kube-api-access-ssx9p\") pod \"dnsmasq-dns-75c8ddd69c-hr4zk\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.663953 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.722030 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" podUID="2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.198:5353: connect: connection refused" Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.857477 4900 generic.go:334] "Generic (PLEG): container finished" podID="a644fee2-5031-470c-a287-34a8963d86c7" containerID="07095d5ca62ecb4330fb45031bd07ea2698b438ba2ee91ee52daf2922b4eff50" exitCode=143 Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.857599 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" event={"ID":"a644fee2-5031-470c-a287-34a8963d86c7","Type":"ContainerDied","Data":"07095d5ca62ecb4330fb45031bd07ea2698b438ba2ee91ee52daf2922b4eff50"} Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.861205 4900 generic.go:334] "Generic (PLEG): container finished" podID="2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" containerID="af53cdca6be00d361acb6abf0b5978ddd065452652f23db0b4ad40494c33394b" exitCode=0 Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.861313 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" event={"ID":"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4","Type":"ContainerDied","Data":"af53cdca6be00d361acb6abf0b5978ddd065452652f23db0b4ad40494c33394b"} Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.864508 4900 generic.go:334] "Generic (PLEG): container finished" podID="4c216bce-8265-4cdb-8104-6267c1196cc2" containerID="8c43da896c9f4193b0949bf2d40b57eec81f0be251163c55686a409d54e75844" exitCode=143 Jan 27 12:51:48 crc kubenswrapper[4900]: I0127 12:51:48.864585 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5657957b69-ncpk6" event={"ID":"4c216bce-8265-4cdb-8104-6267c1196cc2","Type":"ContainerDied","Data":"8c43da896c9f4193b0949bf2d40b57eec81f0be251163c55686a409d54e75844"} Jan 27 12:51:49 crc kubenswrapper[4900]: I0127 12:51:49.892834 4900 generic.go:334] "Generic (PLEG): container finished" podID="f3331ea7-d796-459a-9e9d-6f744ba8822b" containerID="217dbd43523938257b0b4a51576f4dcb1e6b4682f707f56b660a481b1afa3131" exitCode=0 Jan 27 12:51:49 crc kubenswrapper[4900]: I0127 12:51:49.892887 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-2rf6d" event={"ID":"f3331ea7-d796-459a-9e9d-6f744ba8822b","Type":"ContainerDied","Data":"217dbd43523938257b0b4a51576f4dcb1e6b4682f707f56b660a481b1afa3131"} Jan 27 12:51:50 crc kubenswrapper[4900]: I0127 12:51:50.981835 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:51 crc kubenswrapper[4900]: I0127 12:51:51.022409 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.368767 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-nwqdh" Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.373051 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.373143 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.495287 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fnfxx\" (UniqueName: \"kubernetes.io/projected/b5375696-4614-47d4-a8aa-2a98bdd0bd17-kube-api-access-fnfxx\") pod \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\" (UID: \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\") " Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.495506 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5375696-4614-47d4-a8aa-2a98bdd0bd17-combined-ca-bundle\") pod \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\" (UID: \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\") " Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.495608 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5375696-4614-47d4-a8aa-2a98bdd0bd17-config-data\") pod \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\" (UID: \"b5375696-4614-47d4-a8aa-2a98bdd0bd17\") " Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.506196 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5375696-4614-47d4-a8aa-2a98bdd0bd17-kube-api-access-fnfxx" (OuterVolumeSpecName: "kube-api-access-fnfxx") pod "b5375696-4614-47d4-a8aa-2a98bdd0bd17" (UID: "b5375696-4614-47d4-a8aa-2a98bdd0bd17"). InnerVolumeSpecName "kube-api-access-fnfxx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.533694 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5375696-4614-47d4-a8aa-2a98bdd0bd17-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5375696-4614-47d4-a8aa-2a98bdd0bd17" (UID: "b5375696-4614-47d4-a8aa-2a98bdd0bd17"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.595097 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5375696-4614-47d4-a8aa-2a98bdd0bd17-config-data" (OuterVolumeSpecName: "config-data") pod "b5375696-4614-47d4-a8aa-2a98bdd0bd17" (UID: "b5375696-4614-47d4-a8aa-2a98bdd0bd17"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.602502 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fnfxx\" (UniqueName: \"kubernetes.io/projected/b5375696-4614-47d4-a8aa-2a98bdd0bd17-kube-api-access-fnfxx\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.602548 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5375696-4614-47d4-a8aa-2a98bdd0bd17-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.602561 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5375696-4614-47d4-a8aa-2a98bdd0bd17-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.937804 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-nwqdh" event={"ID":"b5375696-4614-47d4-a8aa-2a98bdd0bd17","Type":"ContainerDied","Data":"6af01d0ff9437057bc43a241cc0816f10eb5343b21dbf3c72793b69ccb703aa6"} Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.937852 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6af01d0ff9437057bc43a241cc0816f10eb5343b21dbf3c72793b69ccb703aa6" Jan 27 12:51:52 crc kubenswrapper[4900]: I0127 12:51:52.937880 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-nwqdh" Jan 27 12:51:53 crc kubenswrapper[4900]: I0127 12:51:53.721276 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" podUID="2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.198:5353: connect: connection refused" Jan 27 12:51:53 crc kubenswrapper[4900]: I0127 12:51:53.852557 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.291411 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-65d58d75c7-pn4zc"] Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.295533 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-65d58d75c7-pn4zc" podUID="58051d5f-ca34-40db-9b7c-da0d095fa129" containerName="neutron-api" containerID="cri-o://b8e2928c7635575c4f71d0b5ddd38074992cfe3a7c9b6ae9ab9edfcda36442e3" gracePeriod=30 Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.295608 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-65d58d75c7-pn4zc" podUID="58051d5f-ca34-40db-9b7c-da0d095fa129" containerName="neutron-httpd" containerID="cri-o://546ad62922eae7f52754cea6d7041fce4a43b8e635f8d55636f58925ee390b9f" gracePeriod=30 Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.303629 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-65d58d75c7-pn4zc" podUID="58051d5f-ca34-40db-9b7c-da0d095fa129" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.193:9696/\": EOF" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.324457 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-54df577b4f-lsr4f"] Jan 27 12:51:54 crc kubenswrapper[4900]: E0127 12:51:54.325318 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5375696-4614-47d4-a8aa-2a98bdd0bd17" containerName="heat-db-sync" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.325342 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5375696-4614-47d4-a8aa-2a98bdd0bd17" containerName="heat-db-sync" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.325656 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5375696-4614-47d4-a8aa-2a98bdd0bd17" containerName="heat-db-sync" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.330565 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.368568 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-54df577b4f-lsr4f"] Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.414408 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.450300 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-public-tls-certs\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.450428 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-internal-tls-certs\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.450457 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-ovndb-tls-certs\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.450556 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-config\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.450586 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mphl\" (UniqueName: \"kubernetes.io/projected/d1b3591f-db1d-4d36-b162-a667b95bd5e7-kube-api-access-5mphl\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.450631 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-combined-ca-bundle\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.450663 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-httpd-config\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.461434 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-c6f4bcb48-jgthx" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.555605 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-public-tls-certs\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.555811 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-internal-tls-certs\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.555835 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-ovndb-tls-certs\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.556023 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-config\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.574713 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-internal-tls-certs\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.576595 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-ovndb-tls-certs\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.577158 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mphl\" (UniqueName: \"kubernetes.io/projected/d1b3591f-db1d-4d36-b162-a667b95bd5e7-kube-api-access-5mphl\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.577284 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-combined-ca-bundle\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.577339 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-httpd-config\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.593312 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-public-tls-certs\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.600434 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-httpd-config\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.600505 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-combined-ca-bundle\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.622540 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mphl\" (UniqueName: \"kubernetes.io/projected/d1b3591f-db1d-4d36-b162-a667b95bd5e7-kube-api-access-5mphl\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.626090 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/d1b3591f-db1d-4d36-b162-a667b95bd5e7-config\") pod \"neutron-54df577b4f-lsr4f\" (UID: \"d1b3591f-db1d-4d36-b162-a667b95bd5e7\") " pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.660328 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-85c4fff8fd-7tgsz"] Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.660645 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-85c4fff8fd-7tgsz" podUID="e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" containerName="barbican-api-log" containerID="cri-o://4719a19c48aae149ade5d7793c218bb246da50101d4c8add508ae19c3e7ec9c3" gracePeriod=30 Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.660708 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-85c4fff8fd-7tgsz" podUID="e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" containerName="barbican-api" containerID="cri-o://01b7dc8bda1b099923b8339857bc48311738c75e7b4052871f1e1f9cbd624a83" gracePeriod=30 Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.690706 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.875011 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.898324 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f3331ea7-d796-459a-9e9d-6f744ba8822b-etc-machine-id\") pod \"f3331ea7-d796-459a-9e9d-6f744ba8822b\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.898470 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-config-data\") pod \"f3331ea7-d796-459a-9e9d-6f744ba8822b\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.898527 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4hps\" (UniqueName: \"kubernetes.io/projected/f3331ea7-d796-459a-9e9d-6f744ba8822b-kube-api-access-j4hps\") pod \"f3331ea7-d796-459a-9e9d-6f744ba8822b\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.898586 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-db-sync-config-data\") pod \"f3331ea7-d796-459a-9e9d-6f744ba8822b\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.899378 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-combined-ca-bundle\") pod \"f3331ea7-d796-459a-9e9d-6f744ba8822b\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.899535 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-scripts\") pod \"f3331ea7-d796-459a-9e9d-6f744ba8822b\" (UID: \"f3331ea7-d796-459a-9e9d-6f744ba8822b\") " Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.907577 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f3331ea7-d796-459a-9e9d-6f744ba8822b-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "f3331ea7-d796-459a-9e9d-6f744ba8822b" (UID: "f3331ea7-d796-459a-9e9d-6f744ba8822b"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.907629 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "f3331ea7-d796-459a-9e9d-6f744ba8822b" (UID: "f3331ea7-d796-459a-9e9d-6f744ba8822b"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.910301 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3331ea7-d796-459a-9e9d-6f744ba8822b-kube-api-access-j4hps" (OuterVolumeSpecName: "kube-api-access-j4hps") pod "f3331ea7-d796-459a-9e9d-6f744ba8822b" (UID: "f3331ea7-d796-459a-9e9d-6f744ba8822b"). InnerVolumeSpecName "kube-api-access-j4hps". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.920252 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-scripts" (OuterVolumeSpecName: "scripts") pod "f3331ea7-d796-459a-9e9d-6f744ba8822b" (UID: "f3331ea7-d796-459a-9e9d-6f744ba8822b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:54 crc kubenswrapper[4900]: I0127 12:51:54.997356 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-config-data" (OuterVolumeSpecName: "config-data") pod "f3331ea7-d796-459a-9e9d-6f744ba8822b" (UID: "f3331ea7-d796-459a-9e9d-6f744ba8822b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.004810 4900 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f3331ea7-d796-459a-9e9d-6f744ba8822b-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.004861 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.004871 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4hps\" (UniqueName: \"kubernetes.io/projected/f3331ea7-d796-459a-9e9d-6f744ba8822b-kube-api-access-j4hps\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.004883 4900 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.004891 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.011203 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-2rf6d" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.011811 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-2rf6d" event={"ID":"f3331ea7-d796-459a-9e9d-6f744ba8822b","Type":"ContainerDied","Data":"d0f00e23799dcec62273e3e0f763d5497430c94f1c62c277fe27ef1314a0e2d2"} Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.011841 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0f00e23799dcec62273e3e0f763d5497430c94f1c62c277fe27ef1314a0e2d2" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.018859 4900 generic.go:334] "Generic (PLEG): container finished" podID="58051d5f-ca34-40db-9b7c-da0d095fa129" containerID="546ad62922eae7f52754cea6d7041fce4a43b8e635f8d55636f58925ee390b9f" exitCode=0 Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.018983 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-65d58d75c7-pn4zc" event={"ID":"58051d5f-ca34-40db-9b7c-da0d095fa129","Type":"ContainerDied","Data":"546ad62922eae7f52754cea6d7041fce4a43b8e635f8d55636f58925ee390b9f"} Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.020895 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f3331ea7-d796-459a-9e9d-6f744ba8822b" (UID: "f3331ea7-d796-459a-9e9d-6f744ba8822b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.024905 4900 generic.go:334] "Generic (PLEG): container finished" podID="e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" containerID="4719a19c48aae149ade5d7793c218bb246da50101d4c8add508ae19c3e7ec9c3" exitCode=143 Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.024976 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-85c4fff8fd-7tgsz" event={"ID":"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2","Type":"ContainerDied","Data":"4719a19c48aae149ade5d7793c218bb246da50101d4c8add508ae19c3e7ec9c3"} Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.111031 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3331ea7-d796-459a-9e9d-6f744ba8822b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:55 crc kubenswrapper[4900]: E0127 12:51:55.199155 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.257161 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.315366 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5nw6\" (UniqueName: \"kubernetes.io/projected/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-kube-api-access-q5nw6\") pod \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.315545 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-dns-svc\") pod \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.315848 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-ovsdbserver-nb\") pod \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.315900 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-config\") pod \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.315962 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-ovsdbserver-sb\") pod \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\" (UID: \"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4\") " Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.321185 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-kube-api-access-q5nw6" (OuterVolumeSpecName: "kube-api-access-q5nw6") pod "2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" (UID: "2fb3a242-e7b9-45e6-bf61-7277e8c52cb4"). InnerVolumeSpecName "kube-api-access-q5nw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.401779 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" (UID: "2fb3a242-e7b9-45e6-bf61-7277e8c52cb4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.412374 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" (UID: "2fb3a242-e7b9-45e6-bf61-7277e8c52cb4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.421530 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.421568 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.421579 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5nw6\" (UniqueName: \"kubernetes.io/projected/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-kube-api-access-q5nw6\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.441492 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-hr4zk"] Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.442085 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-config" (OuterVolumeSpecName: "config") pod "2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" (UID: "2fb3a242-e7b9-45e6-bf61-7277e8c52cb4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.504565 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" (UID: "2fb3a242-e7b9-45e6-bf61-7277e8c52cb4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.524097 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.524138 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.585430 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-54df577b4f-lsr4f"] Jan 27 12:51:55 crc kubenswrapper[4900]: I0127 12:51:55.643838 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lkpt6"] Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.085887 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" event={"ID":"2fb3a242-e7b9-45e6-bf61-7277e8c52cb4","Type":"ContainerDied","Data":"e06ba6fa624b74f9c153b9430b19c596d739250fd10210ffd6338bb35c56a9f5"} Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.085963 4900 scope.go:117] "RemoveContainer" containerID="af53cdca6be00d361acb6abf0b5978ddd065452652f23db0b4ad40494c33394b" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.086277 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d649d8c65-gkl5t" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.090712 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" event={"ID":"0f50a23b-28bf-4815-beff-ed18fc15d800","Type":"ContainerStarted","Data":"3e1aed1e8e1c86188dc9de6432c5e50a2b2c9833bcb39de08c38855aa5d0305a"} Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.090971 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" event={"ID":"0f50a23b-28bf-4815-beff-ed18fc15d800","Type":"ContainerStarted","Data":"b4b29cc88bbbd7133b3a3a13084b0e7708f62069acb1c1999a1984bdae67b885"} Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.169006 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed","Type":"ContainerStarted","Data":"e1e73f57cfb63a58f62aa6685b15c965dc148befa18221bf3caf02961db68a47"} Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.169291 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerName="ceilometer-notification-agent" containerID="cri-o://e4682ccb7862290593bfb7000b3d33b671743bff45ecae501e8cf40a8eeaaa86" gracePeriod=30 Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.169586 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.169683 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerName="proxy-httpd" containerID="cri-o://e1e73f57cfb63a58f62aa6685b15c965dc148befa18221bf3caf02961db68a47" gracePeriod=30 Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.169729 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerName="sg-core" containerID="cri-o://ce2accb87d865554efa37cb00ff0fd8f6420bee61fc34aa92160a5cbf58d16a2" gracePeriod=30 Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.203954 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-54df577b4f-lsr4f" event={"ID":"d1b3591f-db1d-4d36-b162-a667b95bd5e7","Type":"ContainerStarted","Data":"63bfded610cb323bfbd71b6f097728a6c4be30b558057fa9eec31154198d7242"} Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.204025 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-54df577b4f-lsr4f" event={"ID":"d1b3591f-db1d-4d36-b162-a667b95bd5e7","Type":"ContainerStarted","Data":"8e55a42eaac20ac13e841c2871e4f25594c605cb310ea82ea287a36a1240b900"} Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.215977 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 12:51:56 crc kubenswrapper[4900]: E0127 12:51:56.218388 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" containerName="dnsmasq-dns" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.218422 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" containerName="dnsmasq-dns" Jan 27 12:51:56 crc kubenswrapper[4900]: E0127 12:51:56.218458 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" containerName="init" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.218466 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" containerName="init" Jan 27 12:51:56 crc kubenswrapper[4900]: E0127 12:51:56.218493 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3331ea7-d796-459a-9e9d-6f744ba8822b" containerName="cinder-db-sync" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.218505 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3331ea7-d796-459a-9e9d-6f744ba8822b" containerName="cinder-db-sync" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.218937 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" containerName="dnsmasq-dns" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.218968 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3331ea7-d796-459a-9e9d-6f744ba8822b" containerName="cinder-db-sync" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.225077 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.237661 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.238437 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-ls755" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.239638 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lkpt6" event={"ID":"a68b9744-48fb-4c32-ac22-7506057a0496","Type":"ContainerStarted","Data":"bf0a07e92b2f31c72c44954e8154ee248011daa6a7d4f587b0c6f7bb9ccd7845"} Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.239814 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lkpt6" event={"ID":"a68b9744-48fb-4c32-ac22-7506057a0496","Type":"ContainerStarted","Data":"6e83f09792263a33a7268e419250026f7bf44f71b4f6c55a5e0d1a85c27c38da"} Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.251476 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.256618 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.257677 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.257762 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-scripts\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.258309 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.259617 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.260204 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6dd52811-14b2-4c08-99ac-a3018d0ae6de-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.260479 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-config-data\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.260871 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4wp9\" (UniqueName: \"kubernetes.io/projected/6dd52811-14b2-4c08-99ac-a3018d0ae6de-kube-api-access-h4wp9\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.355313 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d649d8c65-gkl5t"] Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.389427 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7d649d8c65-gkl5t"] Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.365292 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6dd52811-14b2-4c08-99ac-a3018d0ae6de-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.389947 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-config-data\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.356299 4900 scope.go:117] "RemoveContainer" containerID="97c59a9978fa25a5c6166781a9958b79a4044783115272e29f9a4d4705040702" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.390239 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4wp9\" (UniqueName: \"kubernetes.io/projected/6dd52811-14b2-4c08-99ac-a3018d0ae6de-kube-api-access-h4wp9\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.390614 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.390763 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.390893 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-scripts\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.365361 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6dd52811-14b2-4c08-99ac-a3018d0ae6de-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.396090 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.400940 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-scripts\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.404822 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-config-data\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.419154 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.429129 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-hr4zk"] Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.480863 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4wp9\" (UniqueName: \"kubernetes.io/projected/6dd52811-14b2-4c08-99ac-a3018d0ae6de-kube-api-access-h4wp9\") pod \"cinder-scheduler-0\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.666373 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.848361 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fb3a242-e7b9-45e6-bf61-7277e8c52cb4" path="/var/lib/kubelet/pods/2fb3a242-e7b9-45e6-bf61-7277e8c52cb4/volumes" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.849331 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-89nlm"] Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.959848 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-89nlm"] Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.960713 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.975309 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.978133 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-65d58d75c7-pn4zc" podUID="58051d5f-ca34-40db-9b7c-da0d095fa129" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.193:9696/\": dial tcp 10.217.0.193:9696: connect: connection refused" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.979445 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.979612 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 27 12:51:56 crc kubenswrapper[4900]: I0127 12:51:56.983759 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.008859 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-config-data\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.008930 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.009049 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e07faff-922c-4bb3-a09e-e11135b3e369-logs\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.009115 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z62zp\" (UniqueName: \"kubernetes.io/projected/67de2b7f-64d0-4a97-b742-18fc0abb827c-kube-api-access-z62zp\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.009134 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-scripts\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.009214 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.009318 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t85pv\" (UniqueName: \"kubernetes.io/projected/5e07faff-922c-4bb3-a09e-e11135b3e369-kube-api-access-t85pv\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.009395 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-dns-svc\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.009417 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-config-data-custom\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.009453 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5e07faff-922c-4bb3-a09e-e11135b3e369-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.009471 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-config\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.009492 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.009531 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.119340 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-config-data\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.119497 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.119635 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e07faff-922c-4bb3-a09e-e11135b3e369-logs\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.119745 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-scripts\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.119778 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z62zp\" (UniqueName: \"kubernetes.io/projected/67de2b7f-64d0-4a97-b742-18fc0abb827c-kube-api-access-z62zp\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.119951 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.120171 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t85pv\" (UniqueName: \"kubernetes.io/projected/5e07faff-922c-4bb3-a09e-e11135b3e369-kube-api-access-t85pv\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.120290 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-config-data-custom\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.120319 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-dns-svc\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.120378 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5e07faff-922c-4bb3-a09e-e11135b3e369-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.120404 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-config\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.120455 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.120525 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.124615 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e07faff-922c-4bb3-a09e-e11135b3e369-logs\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.125358 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.127306 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5e07faff-922c-4bb3-a09e-e11135b3e369-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.128040 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-config\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.132962 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-config-data\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.133853 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.135628 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-dns-svc\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.136598 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.136883 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-config-data-custom\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.144562 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-scripts\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.147759 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.165234 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t85pv\" (UniqueName: \"kubernetes.io/projected/5e07faff-922c-4bb3-a09e-e11135b3e369-kube-api-access-t85pv\") pod \"cinder-api-0\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.176730 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z62zp\" (UniqueName: \"kubernetes.io/projected/67de2b7f-64d0-4a97-b742-18fc0abb827c-kube-api-access-z62zp\") pod \"dnsmasq-dns-5784cf869f-89nlm\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.284665 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-54df577b4f-lsr4f" event={"ID":"d1b3591f-db1d-4d36-b162-a667b95bd5e7","Type":"ContainerStarted","Data":"c2f3c87efbd2cf6514e56c2487e04369efe73c6a7037c8c0eb72edc29915b984"} Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.284863 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.293691 4900 generic.go:334] "Generic (PLEG): container finished" podID="a68b9744-48fb-4c32-ac22-7506057a0496" containerID="bf0a07e92b2f31c72c44954e8154ee248011daa6a7d4f587b0c6f7bb9ccd7845" exitCode=0 Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.293782 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lkpt6" event={"ID":"a68b9744-48fb-4c32-ac22-7506057a0496","Type":"ContainerDied","Data":"bf0a07e92b2f31c72c44954e8154ee248011daa6a7d4f587b0c6f7bb9ccd7845"} Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.325784 4900 generic.go:334] "Generic (PLEG): container finished" podID="0f50a23b-28bf-4815-beff-ed18fc15d800" containerID="3e1aed1e8e1c86188dc9de6432c5e50a2b2c9833bcb39de08c38855aa5d0305a" exitCode=0 Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.326051 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" podUID="0f50a23b-28bf-4815-beff-ed18fc15d800" containerName="dnsmasq-dns" containerID="cri-o://cf096aca7a63483526aff4d6b83764767376ea65205c4e7c26ff8b7ea607e934" gracePeriod=10 Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.326147 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" event={"ID":"0f50a23b-28bf-4815-beff-ed18fc15d800","Type":"ContainerDied","Data":"3e1aed1e8e1c86188dc9de6432c5e50a2b2c9833bcb39de08c38855aa5d0305a"} Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.326173 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" event={"ID":"0f50a23b-28bf-4815-beff-ed18fc15d800","Type":"ContainerStarted","Data":"cf096aca7a63483526aff4d6b83764767376ea65205c4e7c26ff8b7ea607e934"} Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.326201 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.343392 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-54df577b4f-lsr4f" podStartSLOduration=3.343361311 podStartE2EDuration="3.343361311s" podCreationTimestamp="2026-01-27 12:51:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:51:57.30970606 +0000 UTC m=+1544.546734270" watchObservedRunningTime="2026-01-27 12:51:57.343361311 +0000 UTC m=+1544.580389521" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.348341 4900 generic.go:334] "Generic (PLEG): container finished" podID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerID="e1e73f57cfb63a58f62aa6685b15c965dc148befa18221bf3caf02961db68a47" exitCode=0 Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.348387 4900 generic.go:334] "Generic (PLEG): container finished" podID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerID="ce2accb87d865554efa37cb00ff0fd8f6420bee61fc34aa92160a5cbf58d16a2" exitCode=2 Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.348415 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed","Type":"ContainerDied","Data":"e1e73f57cfb63a58f62aa6685b15c965dc148befa18221bf3caf02961db68a47"} Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.348479 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed","Type":"ContainerDied","Data":"ce2accb87d865554efa37cb00ff0fd8f6420bee61fc34aa92160a5cbf58d16a2"} Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.371851 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" podStartSLOduration=9.371823551 podStartE2EDuration="9.371823551s" podCreationTimestamp="2026-01-27 12:51:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:51:57.353712103 +0000 UTC m=+1544.590740313" watchObservedRunningTime="2026-01-27 12:51:57.371823551 +0000 UTC m=+1544.608851761" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.432038 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.459905 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 27 12:51:57 crc kubenswrapper[4900]: I0127 12:51:57.709189 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.234068 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.275901 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-ovsdbserver-nb\") pod \"0f50a23b-28bf-4815-beff-ed18fc15d800\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.277224 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-ovsdbserver-sb\") pod \"0f50a23b-28bf-4815-beff-ed18fc15d800\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.277292 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-dns-swift-storage-0\") pod \"0f50a23b-28bf-4815-beff-ed18fc15d800\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.277419 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-dns-svc\") pod \"0f50a23b-28bf-4815-beff-ed18fc15d800\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.278126 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-config\") pod \"0f50a23b-28bf-4815-beff-ed18fc15d800\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.278218 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ssx9p\" (UniqueName: \"kubernetes.io/projected/0f50a23b-28bf-4815-beff-ed18fc15d800-kube-api-access-ssx9p\") pod \"0f50a23b-28bf-4815-beff-ed18fc15d800\" (UID: \"0f50a23b-28bf-4815-beff-ed18fc15d800\") " Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.314359 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f50a23b-28bf-4815-beff-ed18fc15d800-kube-api-access-ssx9p" (OuterVolumeSpecName: "kube-api-access-ssx9p") pod "0f50a23b-28bf-4815-beff-ed18fc15d800" (UID: "0f50a23b-28bf-4815-beff-ed18fc15d800"). InnerVolumeSpecName "kube-api-access-ssx9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.412350 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ssx9p\" (UniqueName: \"kubernetes.io/projected/0f50a23b-28bf-4815-beff-ed18fc15d800-kube-api-access-ssx9p\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.440190 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0f50a23b-28bf-4815-beff-ed18fc15d800" (UID: "0f50a23b-28bf-4815-beff-ed18fc15d800"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.500078 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0f50a23b-28bf-4815-beff-ed18fc15d800" (UID: "0f50a23b-28bf-4815-beff-ed18fc15d800"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.504483 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-config" (OuterVolumeSpecName: "config") pod "0f50a23b-28bf-4815-beff-ed18fc15d800" (UID: "0f50a23b-28bf-4815-beff-ed18fc15d800"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.510140 4900 generic.go:334] "Generic (PLEG): container finished" podID="e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" containerID="01b7dc8bda1b099923b8339857bc48311738c75e7b4052871f1e1f9cbd624a83" exitCode=0 Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.540997 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.541261 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.541373 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.542763 4900 generic.go:334] "Generic (PLEG): container finished" podID="0f50a23b-28bf-4815-beff-ed18fc15d800" containerID="cf096aca7a63483526aff4d6b83764767376ea65205c4e7c26ff8b7ea607e934" exitCode=0 Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.543002 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.572468 4900 generic.go:334] "Generic (PLEG): container finished" podID="a68b9744-48fb-4c32-ac22-7506057a0496" containerID="514f997868c3a1a7a054705582cd990224506b100c580de289e219a7c34b1f50" exitCode=0 Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.623789 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0f50a23b-28bf-4815-beff-ed18fc15d800" (UID: "0f50a23b-28bf-4815-beff-ed18fc15d800"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.647671 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.678346 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0f50a23b-28bf-4815-beff-ed18fc15d800" (UID: "0f50a23b-28bf-4815-beff-ed18fc15d800"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.754212 4900 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0f50a23b-28bf-4815-beff-ed18fc15d800-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.785863 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-85c4fff8fd-7tgsz" event={"ID":"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2","Type":"ContainerDied","Data":"01b7dc8bda1b099923b8339857bc48311738c75e7b4052871f1e1f9cbd624a83"} Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.785907 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" event={"ID":"0f50a23b-28bf-4815-beff-ed18fc15d800","Type":"ContainerDied","Data":"cf096aca7a63483526aff4d6b83764767376ea65205c4e7c26ff8b7ea607e934"} Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.785931 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-hr4zk" event={"ID":"0f50a23b-28bf-4815-beff-ed18fc15d800","Type":"ContainerDied","Data":"b4b29cc88bbbd7133b3a3a13084b0e7708f62069acb1c1999a1984bdae67b885"} Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.785943 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-89nlm"] Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.785959 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lkpt6" event={"ID":"a68b9744-48fb-4c32-ac22-7506057a0496","Type":"ContainerDied","Data":"514f997868c3a1a7a054705582cd990224506b100c580de289e219a7c34b1f50"} Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.785973 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6dd52811-14b2-4c08-99ac-a3018d0ae6de","Type":"ContainerStarted","Data":"0e5ba15cda338ebbe7e9c061eeabe70d573815862da1ac5057f6a973dadd1362"} Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.785996 4900 scope.go:117] "RemoveContainer" containerID="cf096aca7a63483526aff4d6b83764767376ea65205c4e7c26ff8b7ea607e934" Jan 27 12:51:58 crc kubenswrapper[4900]: I0127 12:51:58.793875 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.018596 4900 scope.go:117] "RemoveContainer" containerID="3e1aed1e8e1c86188dc9de6432c5e50a2b2c9833bcb39de08c38855aa5d0305a" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.132883 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.164260 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-hr4zk"] Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.196017 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-hr4zk"] Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.273416 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-logs\") pod \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.273573 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-config-data\") pod \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.273607 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-config-data-custom\") pod \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.273723 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j955b\" (UniqueName: \"kubernetes.io/projected/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-kube-api-access-j955b\") pod \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.273881 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-combined-ca-bundle\") pod \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\" (UID: \"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2\") " Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.275188 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-logs" (OuterVolumeSpecName: "logs") pod "e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" (UID: "e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.294294 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" (UID: "e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.326782 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-kube-api-access-j955b" (OuterVolumeSpecName: "kube-api-access-j955b") pod "e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" (UID: "e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2"). InnerVolumeSpecName "kube-api-access-j955b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.328496 4900 scope.go:117] "RemoveContainer" containerID="cf096aca7a63483526aff4d6b83764767376ea65205c4e7c26ff8b7ea607e934" Jan 27 12:51:59 crc kubenswrapper[4900]: E0127 12:51:59.332811 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf096aca7a63483526aff4d6b83764767376ea65205c4e7c26ff8b7ea607e934\": container with ID starting with cf096aca7a63483526aff4d6b83764767376ea65205c4e7c26ff8b7ea607e934 not found: ID does not exist" containerID="cf096aca7a63483526aff4d6b83764767376ea65205c4e7c26ff8b7ea607e934" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.332881 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf096aca7a63483526aff4d6b83764767376ea65205c4e7c26ff8b7ea607e934"} err="failed to get container status \"cf096aca7a63483526aff4d6b83764767376ea65205c4e7c26ff8b7ea607e934\": rpc error: code = NotFound desc = could not find container \"cf096aca7a63483526aff4d6b83764767376ea65205c4e7c26ff8b7ea607e934\": container with ID starting with cf096aca7a63483526aff4d6b83764767376ea65205c4e7c26ff8b7ea607e934 not found: ID does not exist" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.332915 4900 scope.go:117] "RemoveContainer" containerID="3e1aed1e8e1c86188dc9de6432c5e50a2b2c9833bcb39de08c38855aa5d0305a" Jan 27 12:51:59 crc kubenswrapper[4900]: E0127 12:51:59.342306 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e1aed1e8e1c86188dc9de6432c5e50a2b2c9833bcb39de08c38855aa5d0305a\": container with ID starting with 3e1aed1e8e1c86188dc9de6432c5e50a2b2c9833bcb39de08c38855aa5d0305a not found: ID does not exist" containerID="3e1aed1e8e1c86188dc9de6432c5e50a2b2c9833bcb39de08c38855aa5d0305a" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.342370 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e1aed1e8e1c86188dc9de6432c5e50a2b2c9833bcb39de08c38855aa5d0305a"} err="failed to get container status \"3e1aed1e8e1c86188dc9de6432c5e50a2b2c9833bcb39de08c38855aa5d0305a\": rpc error: code = NotFound desc = could not find container \"3e1aed1e8e1c86188dc9de6432c5e50a2b2c9833bcb39de08c38855aa5d0305a\": container with ID starting with 3e1aed1e8e1c86188dc9de6432c5e50a2b2c9833bcb39de08c38855aa5d0305a not found: ID does not exist" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.441807 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" (UID: "e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.448215 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.448269 4900 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.448289 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j955b\" (UniqueName: \"kubernetes.io/projected/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-kube-api-access-j955b\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.448301 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.453939 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.461931 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-c6f4bcb48-jgthx" podUID="2550873b-f8d1-4bfe-8155-64f7d0929058" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.203:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.581442 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-config-data" (OuterVolumeSpecName: "config-data") pod "e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" (UID: "e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.663593 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.675226 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-85c4fff8fd-7tgsz" event={"ID":"e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2","Type":"ContainerDied","Data":"5beffe6f22919ef74c77d1d94da786d8b46e601bc66b93fff77410f1e51c3271"} Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.675307 4900 scope.go:117] "RemoveContainer" containerID="01b7dc8bda1b099923b8339857bc48311738c75e7b4052871f1e1f9cbd624a83" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.675539 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-85c4fff8fd-7tgsz" Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.692403 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5e07faff-922c-4bb3-a09e-e11135b3e369","Type":"ContainerStarted","Data":"4b2ea7c74f814a052eb2353cda47350da1308fd9b0dc66ee0f4e4afe50d89fe1"} Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.726112 4900 generic.go:334] "Generic (PLEG): container finished" podID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerID="e4682ccb7862290593bfb7000b3d33b671743bff45ecae501e8cf40a8eeaaa86" exitCode=0 Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.726300 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed","Type":"ContainerDied","Data":"e4682ccb7862290593bfb7000b3d33b671743bff45ecae501e8cf40a8eeaaa86"} Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.730615 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" event={"ID":"67de2b7f-64d0-4a97-b742-18fc0abb827c","Type":"ContainerStarted","Data":"58eeec6cfa481b3a6b202e56b634ca5fef9161911760cfcb4c236be9a9054ac3"} Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.791617 4900 generic.go:334] "Generic (PLEG): container finished" podID="58051d5f-ca34-40db-9b7c-da0d095fa129" containerID="b8e2928c7635575c4f71d0b5ddd38074992cfe3a7c9b6ae9ab9edfcda36442e3" exitCode=0 Jan 27 12:51:59 crc kubenswrapper[4900]: I0127 12:51:59.791674 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-65d58d75c7-pn4zc" event={"ID":"58051d5f-ca34-40db-9b7c-da0d095fa129","Type":"ContainerDied","Data":"b8e2928c7635575c4f71d0b5ddd38074992cfe3a7c9b6ae9ab9edfcda36442e3"} Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.024405 4900 scope.go:117] "RemoveContainer" containerID="4719a19c48aae149ade5d7793c218bb246da50101d4c8add508ae19c3e7ec9c3" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.037933 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.105257 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.108127 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-log-httpd\") pod \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.108358 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-combined-ca-bundle\") pod \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.109703 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" (UID: "a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.112023 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-run-httpd\") pod \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.112095 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-sg-core-conf-yaml\") pod \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.112157 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nczbn\" (UniqueName: \"kubernetes.io/projected/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-kube-api-access-nczbn\") pod \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.112256 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-config-data\") pod \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.112370 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-scripts\") pod \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\" (UID: \"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed\") " Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.113381 4900 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.114926 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" (UID: "a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.136758 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-scripts" (OuterVolumeSpecName: "scripts") pod "a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" (UID: "a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.145680 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-kube-api-access-nczbn" (OuterVolumeSpecName: "kube-api-access-nczbn") pod "a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" (UID: "a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed"). InnerVolumeSpecName "kube-api-access-nczbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.174561 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-85c4fff8fd-7tgsz"] Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.204633 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-85c4fff8fd-7tgsz"] Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.206154 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" (UID: "a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.216330 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-config\") pod \"58051d5f-ca34-40db-9b7c-da0d095fa129\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.216663 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-combined-ca-bundle\") pod \"58051d5f-ca34-40db-9b7c-da0d095fa129\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.217178 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-ovndb-tls-certs\") pod \"58051d5f-ca34-40db-9b7c-da0d095fa129\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.217474 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-public-tls-certs\") pod \"58051d5f-ca34-40db-9b7c-da0d095fa129\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.217826 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-httpd-config\") pod \"58051d5f-ca34-40db-9b7c-da0d095fa129\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.217968 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zwdq\" (UniqueName: \"kubernetes.io/projected/58051d5f-ca34-40db-9b7c-da0d095fa129-kube-api-access-7zwdq\") pod \"58051d5f-ca34-40db-9b7c-da0d095fa129\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.218121 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-internal-tls-certs\") pod \"58051d5f-ca34-40db-9b7c-da0d095fa129\" (UID: \"58051d5f-ca34-40db-9b7c-da0d095fa129\") " Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.219183 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.219306 4900 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.219373 4900 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.219439 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nczbn\" (UniqueName: \"kubernetes.io/projected/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-kube-api-access-nczbn\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.253251 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "58051d5f-ca34-40db-9b7c-da0d095fa129" (UID: "58051d5f-ca34-40db-9b7c-da0d095fa129"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.271804 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58051d5f-ca34-40db-9b7c-da0d095fa129-kube-api-access-7zwdq" (OuterVolumeSpecName: "kube-api-access-7zwdq") pod "58051d5f-ca34-40db-9b7c-da0d095fa129" (UID: "58051d5f-ca34-40db-9b7c-da0d095fa129"). InnerVolumeSpecName "kube-api-access-7zwdq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.326426 4900 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.326465 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zwdq\" (UniqueName: \"kubernetes.io/projected/58051d5f-ca34-40db-9b7c-da0d095fa129-kube-api-access-7zwdq\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.507420 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f50a23b-28bf-4815-beff-ed18fc15d800" path="/var/lib/kubelet/pods/0f50a23b-28bf-4815-beff-ed18fc15d800/volumes" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.508855 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" path="/var/lib/kubelet/pods/e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2/volumes" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.583305 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "58051d5f-ca34-40db-9b7c-da0d095fa129" (UID: "58051d5f-ca34-40db-9b7c-da0d095fa129"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.619592 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" (UID: "a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.637550 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "58051d5f-ca34-40db-9b7c-da0d095fa129" (UID: "58051d5f-ca34-40db-9b7c-da0d095fa129"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.643617 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.643685 4900 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.643702 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.673282 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-config" (OuterVolumeSpecName: "config") pod "58051d5f-ca34-40db-9b7c-da0d095fa129" (UID: "58051d5f-ca34-40db-9b7c-da0d095fa129"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.682200 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-config-data" (OuterVolumeSpecName: "config-data") pod "a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" (UID: "a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.696414 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "58051d5f-ca34-40db-9b7c-da0d095fa129" (UID: "58051d5f-ca34-40db-9b7c-da0d095fa129"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.709518 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "58051d5f-ca34-40db-9b7c-da0d095fa129" (UID: "58051d5f-ca34-40db-9b7c-da0d095fa129"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.746511 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.746552 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.746564 4900 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.746585 4900 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58051d5f-ca34-40db-9b7c-da0d095fa129-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.843212 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lkpt6" event={"ID":"a68b9744-48fb-4c32-ac22-7506057a0496","Type":"ContainerStarted","Data":"759ed359f3cfec839ccb18ab276479214cba2c6ff3cef97def118ceb7e3dfc16"} Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.843285 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5e07faff-922c-4bb3-a09e-e11135b3e369","Type":"ContainerStarted","Data":"5faba2fc732b648231d6569a8f588e69834f668a745435fe267df4576c220e15"} Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.852208 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed","Type":"ContainerDied","Data":"ebab9cbb7c23697eda46b670ed9e929b66b719ceb4ed7fcfcfe16397ade522f3"} Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.852287 4900 scope.go:117] "RemoveContainer" containerID="e1e73f57cfb63a58f62aa6685b15c965dc148befa18221bf3caf02961db68a47" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.852467 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.871892 4900 generic.go:334] "Generic (PLEG): container finished" podID="67de2b7f-64d0-4a97-b742-18fc0abb827c" containerID="bf4aea68e3588cbac8bebf035b3571e756c2e37efb375bffdcf2ff3edf93d4ac" exitCode=0 Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.872002 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" event={"ID":"67de2b7f-64d0-4a97-b742-18fc0abb827c","Type":"ContainerDied","Data":"bf4aea68e3588cbac8bebf035b3571e756c2e37efb375bffdcf2ff3edf93d4ac"} Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.872191 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" event={"ID":"67de2b7f-64d0-4a97-b742-18fc0abb827c","Type":"ContainerStarted","Data":"d38f55dfc8e94aa9f8e511c1d2ea8c1cff244e9f2614345e5eeb1ebad0610637"} Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.872465 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.884937 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lkpt6" podStartSLOduration=10.564265871 podStartE2EDuration="13.884902962s" podCreationTimestamp="2026-01-27 12:51:47 +0000 UTC" firstStartedPulling="2026-01-27 12:51:56.306008452 +0000 UTC m=+1543.543036662" lastFinishedPulling="2026-01-27 12:51:59.626645533 +0000 UTC m=+1546.863673753" observedRunningTime="2026-01-27 12:52:00.848158961 +0000 UTC m=+1548.085187171" watchObservedRunningTime="2026-01-27 12:52:00.884902962 +0000 UTC m=+1548.121931172" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.887091 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-65d58d75c7-pn4zc" event={"ID":"58051d5f-ca34-40db-9b7c-da0d095fa129","Type":"ContainerDied","Data":"892a25fdbbf96f8a19a5a35d81b533d21243c6f60fbf39d7c6fc8f3867d6f0bb"} Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.887283 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-65d58d75c7-pn4zc" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.907484 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" podStartSLOduration=4.907462109 podStartE2EDuration="4.907462109s" podCreationTimestamp="2026-01-27 12:51:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:52:00.905587545 +0000 UTC m=+1548.142615755" watchObservedRunningTime="2026-01-27 12:52:00.907462109 +0000 UTC m=+1548.144490319" Jan 27 12:52:00 crc kubenswrapper[4900]: I0127 12:52:00.951359 4900 scope.go:117] "RemoveContainer" containerID="ce2accb87d865554efa37cb00ff0fd8f6420bee61fc34aa92160a5cbf58d16a2" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.081926 4900 scope.go:117] "RemoveContainer" containerID="e4682ccb7862290593bfb7000b3d33b671743bff45ecae501e8cf40a8eeaaa86" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.104888 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-65d58d75c7-pn4zc"] Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.155677 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-65d58d75c7-pn4zc"] Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.174243 4900 scope.go:117] "RemoveContainer" containerID="546ad62922eae7f52754cea6d7041fce4a43b8e635f8d55636f58925ee390b9f" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.208414 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.248707 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.273076 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:52:01 crc kubenswrapper[4900]: E0127 12:52:01.273906 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f50a23b-28bf-4815-beff-ed18fc15d800" containerName="dnsmasq-dns" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.273931 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f50a23b-28bf-4815-beff-ed18fc15d800" containerName="dnsmasq-dns" Jan 27 12:52:01 crc kubenswrapper[4900]: E0127 12:52:01.273942 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58051d5f-ca34-40db-9b7c-da0d095fa129" containerName="neutron-httpd" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.273948 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="58051d5f-ca34-40db-9b7c-da0d095fa129" containerName="neutron-httpd" Jan 27 12:52:01 crc kubenswrapper[4900]: E0127 12:52:01.273970 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerName="ceilometer-notification-agent" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.273976 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerName="ceilometer-notification-agent" Jan 27 12:52:01 crc kubenswrapper[4900]: E0127 12:52:01.273986 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f50a23b-28bf-4815-beff-ed18fc15d800" containerName="init" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.273992 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f50a23b-28bf-4815-beff-ed18fc15d800" containerName="init" Jan 27 12:52:01 crc kubenswrapper[4900]: E0127 12:52:01.274024 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerName="proxy-httpd" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.274030 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerName="proxy-httpd" Jan 27 12:52:01 crc kubenswrapper[4900]: E0127 12:52:01.274050 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" containerName="barbican-api-log" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.274105 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" containerName="barbican-api-log" Jan 27 12:52:01 crc kubenswrapper[4900]: E0127 12:52:01.274119 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" containerName="barbican-api" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.274125 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" containerName="barbican-api" Jan 27 12:52:01 crc kubenswrapper[4900]: E0127 12:52:01.274135 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerName="sg-core" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.274141 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerName="sg-core" Jan 27 12:52:01 crc kubenswrapper[4900]: E0127 12:52:01.274155 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58051d5f-ca34-40db-9b7c-da0d095fa129" containerName="neutron-api" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.274161 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="58051d5f-ca34-40db-9b7c-da0d095fa129" containerName="neutron-api" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.274399 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerName="proxy-httpd" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.274416 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerName="sg-core" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.274429 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" containerName="ceilometer-notification-agent" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.274445 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" containerName="barbican-api-log" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.274454 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f50a23b-28bf-4815-beff-ed18fc15d800" containerName="dnsmasq-dns" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.274461 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="58051d5f-ca34-40db-9b7c-da0d095fa129" containerName="neutron-api" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.274467 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2df37ae-b2dc-4d2d-8746-5b4cd29ceea2" containerName="barbican-api" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.274478 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="58051d5f-ca34-40db-9b7c-da0d095fa129" containerName="neutron-httpd" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.285407 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.288414 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.288843 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.307747 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.320094 4900 scope.go:117] "RemoveContainer" containerID="b8e2928c7635575c4f71d0b5ddd38074992cfe3a7c9b6ae9ab9edfcda36442e3" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.419147 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/33045450-fef6-4a24-a926-c91d19c2fe02-run-httpd\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.419405 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcwpp\" (UniqueName: \"kubernetes.io/projected/33045450-fef6-4a24-a926-c91d19c2fe02-kube-api-access-tcwpp\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.419567 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-scripts\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.419633 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-config-data\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.419846 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/33045450-fef6-4a24-a926-c91d19c2fe02-log-httpd\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.419928 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.420097 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.522965 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.523124 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/33045450-fef6-4a24-a926-c91d19c2fe02-run-httpd\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.523220 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcwpp\" (UniqueName: \"kubernetes.io/projected/33045450-fef6-4a24-a926-c91d19c2fe02-kube-api-access-tcwpp\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.523277 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-scripts\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.523306 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-config-data\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.523420 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/33045450-fef6-4a24-a926-c91d19c2fe02-log-httpd\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.523465 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.525177 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/33045450-fef6-4a24-a926-c91d19c2fe02-run-httpd\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.525909 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/33045450-fef6-4a24-a926-c91d19c2fe02-log-httpd\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.536012 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-scripts\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.536499 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-config-data\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.536720 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.542992 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.565392 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcwpp\" (UniqueName: \"kubernetes.io/projected/33045450-fef6-4a24-a926-c91d19c2fe02-kube-api-access-tcwpp\") pod \"ceilometer-0\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.616740 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.942652 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5e07faff-922c-4bb3-a09e-e11135b3e369","Type":"ContainerStarted","Data":"e37ceed8d91756b720f2c156156acea0cdf7a311ddadcd863a77eb716eed968f"} Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.943395 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="5e07faff-922c-4bb3-a09e-e11135b3e369" containerName="cinder-api-log" containerID="cri-o://5faba2fc732b648231d6569a8f588e69834f668a745435fe267df4576c220e15" gracePeriod=30 Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.943880 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.944005 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="5e07faff-922c-4bb3-a09e-e11135b3e369" containerName="cinder-api" containerID="cri-o://e37ceed8d91756b720f2c156156acea0cdf7a311ddadcd863a77eb716eed968f" gracePeriod=30 Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.979395 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.979372427 podStartE2EDuration="5.979372427s" podCreationTimestamp="2026-01-27 12:51:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:52:01.965587455 +0000 UTC m=+1549.202615665" watchObservedRunningTime="2026-01-27 12:52:01.979372427 +0000 UTC m=+1549.216400637" Jan 27 12:52:01 crc kubenswrapper[4900]: I0127 12:52:01.984805 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6dd52811-14b2-4c08-99ac-a3018d0ae6de","Type":"ContainerStarted","Data":"ecfb9c3fbd100145b02f69dd08f9641b6030095c4ff2be3fe213ccce8741af35"} Jan 27 12:52:02 crc kubenswrapper[4900]: I0127 12:52:02.182486 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:52:02 crc kubenswrapper[4900]: I0127 12:52:02.502166 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58051d5f-ca34-40db-9b7c-da0d095fa129" path="/var/lib/kubelet/pods/58051d5f-ca34-40db-9b7c-da0d095fa129/volumes" Jan 27 12:52:02 crc kubenswrapper[4900]: I0127 12:52:02.503819 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed" path="/var/lib/kubelet/pods/a68933a3-7eb5-43a9-85fd-2a2cc68bc0ed/volumes" Jan 27 12:52:02 crc kubenswrapper[4900]: I0127 12:52:02.999331 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"33045450-fef6-4a24-a926-c91d19c2fe02","Type":"ContainerStarted","Data":"d18819e0069d49a28d9cec8272e82136f3b390380a58369a4f08d2d98da08f9a"} Jan 27 12:52:02 crc kubenswrapper[4900]: I0127 12:52:02.999400 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"33045450-fef6-4a24-a926-c91d19c2fe02","Type":"ContainerStarted","Data":"2a5332746527779a21ac8f28d29d255b73addf451a262048cd4ab802425334eb"} Jan 27 12:52:03 crc kubenswrapper[4900]: I0127 12:52:03.001981 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6dd52811-14b2-4c08-99ac-a3018d0ae6de","Type":"ContainerStarted","Data":"1adcb6eef9bcf9ac83993f9a522c6199ab89db4b69bd95881470ec36c7cc2e1c"} Jan 27 12:52:03 crc kubenswrapper[4900]: I0127 12:52:03.005296 4900 generic.go:334] "Generic (PLEG): container finished" podID="5e07faff-922c-4bb3-a09e-e11135b3e369" containerID="5faba2fc732b648231d6569a8f588e69834f668a745435fe267df4576c220e15" exitCode=143 Jan 27 12:52:03 crc kubenswrapper[4900]: I0127 12:52:03.005411 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5e07faff-922c-4bb3-a09e-e11135b3e369","Type":"ContainerDied","Data":"5faba2fc732b648231d6569a8f588e69834f668a745435fe267df4576c220e15"} Jan 27 12:52:03 crc kubenswrapper[4900]: I0127 12:52:03.052904 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.445516054 podStartE2EDuration="7.052872601s" podCreationTimestamp="2026-01-27 12:51:56 +0000 UTC" firstStartedPulling="2026-01-27 12:51:57.735009458 +0000 UTC m=+1544.972037668" lastFinishedPulling="2026-01-27 12:51:59.342366005 +0000 UTC m=+1546.579394215" observedRunningTime="2026-01-27 12:52:03.031755376 +0000 UTC m=+1550.268783606" watchObservedRunningTime="2026-01-27 12:52:03.052872601 +0000 UTC m=+1550.289900811" Jan 27 12:52:04 crc kubenswrapper[4900]: I0127 12:52:04.046205 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"33045450-fef6-4a24-a926-c91d19c2fe02","Type":"ContainerStarted","Data":"255c80b80399b7f9ab2b447d472f0a9004a1e964b6a080dff6a20e18fa89fdf0"} Jan 27 12:52:04 crc kubenswrapper[4900]: I0127 12:52:04.638286 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:52:04 crc kubenswrapper[4900]: I0127 12:52:04.711006 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-74d8686b8d-zh4mj" Jan 27 12:52:05 crc kubenswrapper[4900]: I0127 12:52:05.062930 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"33045450-fef6-4a24-a926-c91d19c2fe02","Type":"ContainerStarted","Data":"e8efa6c66a1b88295c17777f8906520c4063eb3e6714590fae2fe2c1e524121c"} Jan 27 12:52:06 crc kubenswrapper[4900]: I0127 12:52:06.077018 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"33045450-fef6-4a24-a926-c91d19c2fe02","Type":"ContainerStarted","Data":"58bf39b05777fadec75f938969df1684c256309712db6ae44ef0d01e2a406b7e"} Jan 27 12:52:06 crc kubenswrapper[4900]: I0127 12:52:06.079929 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 12:52:06 crc kubenswrapper[4900]: I0127 12:52:06.106233 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.7630651130000001 podStartE2EDuration="5.10620788s" podCreationTimestamp="2026-01-27 12:52:01 +0000 UTC" firstStartedPulling="2026-01-27 12:52:02.18801433 +0000 UTC m=+1549.425042540" lastFinishedPulling="2026-01-27 12:52:05.531157087 +0000 UTC m=+1552.768185307" observedRunningTime="2026-01-27 12:52:06.10000814 +0000 UTC m=+1553.337036370" watchObservedRunningTime="2026-01-27 12:52:06.10620788 +0000 UTC m=+1553.343236090" Jan 27 12:52:06 crc kubenswrapper[4900]: I0127 12:52:06.668264 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 27 12:52:06 crc kubenswrapper[4900]: I0127 12:52:06.901835 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 27 12:52:07 crc kubenswrapper[4900]: I0127 12:52:07.139314 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 12:52:07 crc kubenswrapper[4900]: I0127 12:52:07.395984 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:52:07 crc kubenswrapper[4900]: I0127 12:52:07.396128 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:52:07 crc kubenswrapper[4900]: I0127 12:52:07.434140 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:52:07 crc kubenswrapper[4900]: I0127 12:52:07.471462 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:52:07 crc kubenswrapper[4900]: I0127 12:52:07.521567 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-qbm9l"] Jan 27 12:52:07 crc kubenswrapper[4900]: I0127 12:52:07.521844 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-fb745b69-qbm9l" podUID="47942ee1-102a-42b0-a25a-e41b9a020d95" containerName="dnsmasq-dns" containerID="cri-o://e657aeacbe8e5e63fbbf129c3ed6c2019d7448ad183596ce5fe4a231cfa6538c" gracePeriod=10 Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.102948 4900 generic.go:334] "Generic (PLEG): container finished" podID="47942ee1-102a-42b0-a25a-e41b9a020d95" containerID="e657aeacbe8e5e63fbbf129c3ed6c2019d7448ad183596ce5fe4a231cfa6538c" exitCode=0 Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.103231 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="6dd52811-14b2-4c08-99ac-a3018d0ae6de" containerName="cinder-scheduler" containerID="cri-o://ecfb9c3fbd100145b02f69dd08f9641b6030095c4ff2be3fe213ccce8741af35" gracePeriod=30 Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.103338 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-qbm9l" event={"ID":"47942ee1-102a-42b0-a25a-e41b9a020d95","Type":"ContainerDied","Data":"e657aeacbe8e5e63fbbf129c3ed6c2019d7448ad183596ce5fe4a231cfa6538c"} Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.105279 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="6dd52811-14b2-4c08-99ac-a3018d0ae6de" containerName="probe" containerID="cri-o://1adcb6eef9bcf9ac83993f9a522c6199ab89db4b69bd95881470ec36c7cc2e1c" gracePeriod=30 Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.156705 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.230358 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.230380 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lkpt6"] Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.350789 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6wpb\" (UniqueName: \"kubernetes.io/projected/47942ee1-102a-42b0-a25a-e41b9a020d95-kube-api-access-c6wpb\") pod \"47942ee1-102a-42b0-a25a-e41b9a020d95\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.350887 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-config\") pod \"47942ee1-102a-42b0-a25a-e41b9a020d95\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.351075 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-ovsdbserver-nb\") pod \"47942ee1-102a-42b0-a25a-e41b9a020d95\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.353247 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-ovsdbserver-sb\") pod \"47942ee1-102a-42b0-a25a-e41b9a020d95\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.353348 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-dns-svc\") pod \"47942ee1-102a-42b0-a25a-e41b9a020d95\" (UID: \"47942ee1-102a-42b0-a25a-e41b9a020d95\") " Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.374164 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47942ee1-102a-42b0-a25a-e41b9a020d95-kube-api-access-c6wpb" (OuterVolumeSpecName: "kube-api-access-c6wpb") pod "47942ee1-102a-42b0-a25a-e41b9a020d95" (UID: "47942ee1-102a-42b0-a25a-e41b9a020d95"). InnerVolumeSpecName "kube-api-access-c6wpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.445594 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-config" (OuterVolumeSpecName: "config") pod "47942ee1-102a-42b0-a25a-e41b9a020d95" (UID: "47942ee1-102a-42b0-a25a-e41b9a020d95"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.451495 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "47942ee1-102a-42b0-a25a-e41b9a020d95" (UID: "47942ee1-102a-42b0-a25a-e41b9a020d95"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.457660 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6wpb\" (UniqueName: \"kubernetes.io/projected/47942ee1-102a-42b0-a25a-e41b9a020d95-kube-api-access-c6wpb\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.457703 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.457716 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.460153 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "47942ee1-102a-42b0-a25a-e41b9a020d95" (UID: "47942ee1-102a-42b0-a25a-e41b9a020d95"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.465563 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "47942ee1-102a-42b0-a25a-e41b9a020d95" (UID: "47942ee1-102a-42b0-a25a-e41b9a020d95"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.560826 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:08 crc kubenswrapper[4900]: I0127 12:52:08.560870 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/47942ee1-102a-42b0-a25a-e41b9a020d95-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:09 crc kubenswrapper[4900]: I0127 12:52:09.117987 4900 generic.go:334] "Generic (PLEG): container finished" podID="6dd52811-14b2-4c08-99ac-a3018d0ae6de" containerID="1adcb6eef9bcf9ac83993f9a522c6199ab89db4b69bd95881470ec36c7cc2e1c" exitCode=0 Jan 27 12:52:09 crc kubenswrapper[4900]: I0127 12:52:09.118114 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6dd52811-14b2-4c08-99ac-a3018d0ae6de","Type":"ContainerDied","Data":"1adcb6eef9bcf9ac83993f9a522c6199ab89db4b69bd95881470ec36c7cc2e1c"} Jan 27 12:52:09 crc kubenswrapper[4900]: I0127 12:52:09.120745 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-qbm9l" event={"ID":"47942ee1-102a-42b0-a25a-e41b9a020d95","Type":"ContainerDied","Data":"709bf15b1cdf3bd5e8051079d9f5cf35bc66044e13b4323a797e7c1fde4066a1"} Jan 27 12:52:09 crc kubenswrapper[4900]: I0127 12:52:09.120780 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb745b69-qbm9l" Jan 27 12:52:09 crc kubenswrapper[4900]: I0127 12:52:09.120829 4900 scope.go:117] "RemoveContainer" containerID="e657aeacbe8e5e63fbbf129c3ed6c2019d7448ad183596ce5fe4a231cfa6538c" Jan 27 12:52:09 crc kubenswrapper[4900]: I0127 12:52:09.154472 4900 scope.go:117] "RemoveContainer" containerID="d426933f4f20c727c89c158c0835711132a2af9aa079afd56f846cb5683aa071" Jan 27 12:52:09 crc kubenswrapper[4900]: I0127 12:52:09.158731 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-qbm9l"] Jan 27 12:52:09 crc kubenswrapper[4900]: I0127 12:52:09.171884 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-qbm9l"] Jan 27 12:52:10 crc kubenswrapper[4900]: I0127 12:52:10.131625 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lkpt6" podUID="a68b9744-48fb-4c32-ac22-7506057a0496" containerName="registry-server" containerID="cri-o://759ed359f3cfec839ccb18ab276479214cba2c6ff3cef97def118ceb7e3dfc16" gracePeriod=2 Jan 27 12:52:10 crc kubenswrapper[4900]: I0127 12:52:10.371963 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 27 12:52:10 crc kubenswrapper[4900]: I0127 12:52:10.498601 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47942ee1-102a-42b0-a25a-e41b9a020d95" path="/var/lib/kubelet/pods/47942ee1-102a-42b0-a25a-e41b9a020d95/volumes" Jan 27 12:52:10 crc kubenswrapper[4900]: I0127 12:52:10.746552 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:52:10 crc kubenswrapper[4900]: I0127 12:52:10.832511 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5p59\" (UniqueName: \"kubernetes.io/projected/a68b9744-48fb-4c32-ac22-7506057a0496-kube-api-access-q5p59\") pod \"a68b9744-48fb-4c32-ac22-7506057a0496\" (UID: \"a68b9744-48fb-4c32-ac22-7506057a0496\") " Jan 27 12:52:10 crc kubenswrapper[4900]: I0127 12:52:10.833271 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a68b9744-48fb-4c32-ac22-7506057a0496-catalog-content\") pod \"a68b9744-48fb-4c32-ac22-7506057a0496\" (UID: \"a68b9744-48fb-4c32-ac22-7506057a0496\") " Jan 27 12:52:10 crc kubenswrapper[4900]: I0127 12:52:10.833405 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a68b9744-48fb-4c32-ac22-7506057a0496-utilities\") pod \"a68b9744-48fb-4c32-ac22-7506057a0496\" (UID: \"a68b9744-48fb-4c32-ac22-7506057a0496\") " Jan 27 12:52:10 crc kubenswrapper[4900]: I0127 12:52:10.838654 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a68b9744-48fb-4c32-ac22-7506057a0496-utilities" (OuterVolumeSpecName: "utilities") pod "a68b9744-48fb-4c32-ac22-7506057a0496" (UID: "a68b9744-48fb-4c32-ac22-7506057a0496"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:10 crc kubenswrapper[4900]: I0127 12:52:10.840136 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a68b9744-48fb-4c32-ac22-7506057a0496-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:10 crc kubenswrapper[4900]: I0127 12:52:10.856962 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a68b9744-48fb-4c32-ac22-7506057a0496-kube-api-access-q5p59" (OuterVolumeSpecName: "kube-api-access-q5p59") pod "a68b9744-48fb-4c32-ac22-7506057a0496" (UID: "a68b9744-48fb-4c32-ac22-7506057a0496"). InnerVolumeSpecName "kube-api-access-q5p59". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:10 crc kubenswrapper[4900]: I0127 12:52:10.863874 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a68b9744-48fb-4c32-ac22-7506057a0496-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a68b9744-48fb-4c32-ac22-7506057a0496" (UID: "a68b9744-48fb-4c32-ac22-7506057a0496"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:10 crc kubenswrapper[4900]: I0127 12:52:10.874252 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-cdc8bdcbf-4ltg2" Jan 27 12:52:10 crc kubenswrapper[4900]: I0127 12:52:10.943560 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5p59\" (UniqueName: \"kubernetes.io/projected/a68b9744-48fb-4c32-ac22-7506057a0496-kube-api-access-q5p59\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:10 crc kubenswrapper[4900]: I0127 12:52:10.943609 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a68b9744-48fb-4c32-ac22-7506057a0496-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.199939 4900 generic.go:334] "Generic (PLEG): container finished" podID="a68b9744-48fb-4c32-ac22-7506057a0496" containerID="759ed359f3cfec839ccb18ab276479214cba2c6ff3cef97def118ceb7e3dfc16" exitCode=0 Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.200047 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lkpt6" event={"ID":"a68b9744-48fb-4c32-ac22-7506057a0496","Type":"ContainerDied","Data":"759ed359f3cfec839ccb18ab276479214cba2c6ff3cef97def118ceb7e3dfc16"} Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.200106 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lkpt6" event={"ID":"a68b9744-48fb-4c32-ac22-7506057a0496","Type":"ContainerDied","Data":"6e83f09792263a33a7268e419250026f7bf44f71b4f6c55a5e0d1a85c27c38da"} Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.200140 4900 scope.go:117] "RemoveContainer" containerID="759ed359f3cfec839ccb18ab276479214cba2c6ff3cef97def118ceb7e3dfc16" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.200280 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lkpt6" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.209125 4900 generic.go:334] "Generic (PLEG): container finished" podID="6dd52811-14b2-4c08-99ac-a3018d0ae6de" containerID="ecfb9c3fbd100145b02f69dd08f9641b6030095c4ff2be3fe213ccce8741af35" exitCode=0 Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.209288 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6dd52811-14b2-4c08-99ac-a3018d0ae6de","Type":"ContainerDied","Data":"ecfb9c3fbd100145b02f69dd08f9641b6030095c4ff2be3fe213ccce8741af35"} Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.263810 4900 scope.go:117] "RemoveContainer" containerID="514f997868c3a1a7a054705582cd990224506b100c580de289e219a7c34b1f50" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.272730 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lkpt6"] Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.287067 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lkpt6"] Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.312835 4900 scope.go:117] "RemoveContainer" containerID="bf0a07e92b2f31c72c44954e8154ee248011daa6a7d4f587b0c6f7bb9ccd7845" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.426701 4900 scope.go:117] "RemoveContainer" containerID="759ed359f3cfec839ccb18ab276479214cba2c6ff3cef97def118ceb7e3dfc16" Jan 27 12:52:11 crc kubenswrapper[4900]: E0127 12:52:11.428942 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"759ed359f3cfec839ccb18ab276479214cba2c6ff3cef97def118ceb7e3dfc16\": container with ID starting with 759ed359f3cfec839ccb18ab276479214cba2c6ff3cef97def118ceb7e3dfc16 not found: ID does not exist" containerID="759ed359f3cfec839ccb18ab276479214cba2c6ff3cef97def118ceb7e3dfc16" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.429103 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"759ed359f3cfec839ccb18ab276479214cba2c6ff3cef97def118ceb7e3dfc16"} err="failed to get container status \"759ed359f3cfec839ccb18ab276479214cba2c6ff3cef97def118ceb7e3dfc16\": rpc error: code = NotFound desc = could not find container \"759ed359f3cfec839ccb18ab276479214cba2c6ff3cef97def118ceb7e3dfc16\": container with ID starting with 759ed359f3cfec839ccb18ab276479214cba2c6ff3cef97def118ceb7e3dfc16 not found: ID does not exist" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.429249 4900 scope.go:117] "RemoveContainer" containerID="514f997868c3a1a7a054705582cd990224506b100c580de289e219a7c34b1f50" Jan 27 12:52:11 crc kubenswrapper[4900]: E0127 12:52:11.429972 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"514f997868c3a1a7a054705582cd990224506b100c580de289e219a7c34b1f50\": container with ID starting with 514f997868c3a1a7a054705582cd990224506b100c580de289e219a7c34b1f50 not found: ID does not exist" containerID="514f997868c3a1a7a054705582cd990224506b100c580de289e219a7c34b1f50" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.430022 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"514f997868c3a1a7a054705582cd990224506b100c580de289e219a7c34b1f50"} err="failed to get container status \"514f997868c3a1a7a054705582cd990224506b100c580de289e219a7c34b1f50\": rpc error: code = NotFound desc = could not find container \"514f997868c3a1a7a054705582cd990224506b100c580de289e219a7c34b1f50\": container with ID starting with 514f997868c3a1a7a054705582cd990224506b100c580de289e219a7c34b1f50 not found: ID does not exist" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.430108 4900 scope.go:117] "RemoveContainer" containerID="bf0a07e92b2f31c72c44954e8154ee248011daa6a7d4f587b0c6f7bb9ccd7845" Jan 27 12:52:11 crc kubenswrapper[4900]: E0127 12:52:11.431325 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf0a07e92b2f31c72c44954e8154ee248011daa6a7d4f587b0c6f7bb9ccd7845\": container with ID starting with bf0a07e92b2f31c72c44954e8154ee248011daa6a7d4f587b0c6f7bb9ccd7845 not found: ID does not exist" containerID="bf0a07e92b2f31c72c44954e8154ee248011daa6a7d4f587b0c6f7bb9ccd7845" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.431385 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf0a07e92b2f31c72c44954e8154ee248011daa6a7d4f587b0c6f7bb9ccd7845"} err="failed to get container status \"bf0a07e92b2f31c72c44954e8154ee248011daa6a7d4f587b0c6f7bb9ccd7845\": rpc error: code = NotFound desc = could not find container \"bf0a07e92b2f31c72c44954e8154ee248011daa6a7d4f587b0c6f7bb9ccd7845\": container with ID starting with bf0a07e92b2f31c72c44954e8154ee248011daa6a7d4f587b0c6f7bb9ccd7845 not found: ID does not exist" Jan 27 12:52:11 crc kubenswrapper[4900]: E0127 12:52:11.520629 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda68b9744_48fb_4c32_ac22_7506057a0496.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda68b9744_48fb_4c32_ac22_7506057a0496.slice/crio-6e83f09792263a33a7268e419250026f7bf44f71b4f6c55a5e0d1a85c27c38da\": RecentStats: unable to find data in memory cache]" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.600920 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.676231 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-config-data-custom\") pod \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.676408 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4wp9\" (UniqueName: \"kubernetes.io/projected/6dd52811-14b2-4c08-99ac-a3018d0ae6de-kube-api-access-h4wp9\") pod \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.676443 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-scripts\") pod \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.676532 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-combined-ca-bundle\") pod \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.676675 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6dd52811-14b2-4c08-99ac-a3018d0ae6de-etc-machine-id\") pod \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.676905 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-config-data\") pod \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\" (UID: \"6dd52811-14b2-4c08-99ac-a3018d0ae6de\") " Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.677977 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6dd52811-14b2-4c08-99ac-a3018d0ae6de-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "6dd52811-14b2-4c08-99ac-a3018d0ae6de" (UID: "6dd52811-14b2-4c08-99ac-a3018d0ae6de"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.683778 4900 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6dd52811-14b2-4c08-99ac-a3018d0ae6de-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.693816 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6dd52811-14b2-4c08-99ac-a3018d0ae6de-kube-api-access-h4wp9" (OuterVolumeSpecName: "kube-api-access-h4wp9") pod "6dd52811-14b2-4c08-99ac-a3018d0ae6de" (UID: "6dd52811-14b2-4c08-99ac-a3018d0ae6de"). InnerVolumeSpecName "kube-api-access-h4wp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.703276 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6dd52811-14b2-4c08-99ac-a3018d0ae6de" (UID: "6dd52811-14b2-4c08-99ac-a3018d0ae6de"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.709131 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-scripts" (OuterVolumeSpecName: "scripts") pod "6dd52811-14b2-4c08-99ac-a3018d0ae6de" (UID: "6dd52811-14b2-4c08-99ac-a3018d0ae6de"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.783104 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6dd52811-14b2-4c08-99ac-a3018d0ae6de" (UID: "6dd52811-14b2-4c08-99ac-a3018d0ae6de"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.786740 4900 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.786777 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4wp9\" (UniqueName: \"kubernetes.io/projected/6dd52811-14b2-4c08-99ac-a3018d0ae6de-kube-api-access-h4wp9\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.786792 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.786801 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.823917 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-config-data" (OuterVolumeSpecName: "config-data") pod "6dd52811-14b2-4c08-99ac-a3018d0ae6de" (UID: "6dd52811-14b2-4c08-99ac-a3018d0ae6de"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:11 crc kubenswrapper[4900]: I0127 12:52:11.889520 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6dd52811-14b2-4c08-99ac-a3018d0ae6de-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.231415 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6dd52811-14b2-4c08-99ac-a3018d0ae6de","Type":"ContainerDied","Data":"0e5ba15cda338ebbe7e9c061eeabe70d573815862da1ac5057f6a973dadd1362"} Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.231488 4900 scope.go:117] "RemoveContainer" containerID="1adcb6eef9bcf9ac83993f9a522c6199ab89db4b69bd95881470ec36c7cc2e1c" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.232071 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.260167 4900 scope.go:117] "RemoveContainer" containerID="ecfb9c3fbd100145b02f69dd08f9641b6030095c4ff2be3fe213ccce8741af35" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.279966 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.320695 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.363304 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 12:52:12 crc kubenswrapper[4900]: E0127 12:52:12.364351 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47942ee1-102a-42b0-a25a-e41b9a020d95" containerName="init" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.364381 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="47942ee1-102a-42b0-a25a-e41b9a020d95" containerName="init" Jan 27 12:52:12 crc kubenswrapper[4900]: E0127 12:52:12.364443 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a68b9744-48fb-4c32-ac22-7506057a0496" containerName="extract-content" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.364450 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a68b9744-48fb-4c32-ac22-7506057a0496" containerName="extract-content" Jan 27 12:52:12 crc kubenswrapper[4900]: E0127 12:52:12.364467 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dd52811-14b2-4c08-99ac-a3018d0ae6de" containerName="cinder-scheduler" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.364474 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dd52811-14b2-4c08-99ac-a3018d0ae6de" containerName="cinder-scheduler" Jan 27 12:52:12 crc kubenswrapper[4900]: E0127 12:52:12.364491 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47942ee1-102a-42b0-a25a-e41b9a020d95" containerName="dnsmasq-dns" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.364499 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="47942ee1-102a-42b0-a25a-e41b9a020d95" containerName="dnsmasq-dns" Jan 27 12:52:12 crc kubenswrapper[4900]: E0127 12:52:12.364521 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a68b9744-48fb-4c32-ac22-7506057a0496" containerName="registry-server" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.364533 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a68b9744-48fb-4c32-ac22-7506057a0496" containerName="registry-server" Jan 27 12:52:12 crc kubenswrapper[4900]: E0127 12:52:12.364541 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dd52811-14b2-4c08-99ac-a3018d0ae6de" containerName="probe" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.364547 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dd52811-14b2-4c08-99ac-a3018d0ae6de" containerName="probe" Jan 27 12:52:12 crc kubenswrapper[4900]: E0127 12:52:12.364564 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a68b9744-48fb-4c32-ac22-7506057a0496" containerName="extract-utilities" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.364571 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a68b9744-48fb-4c32-ac22-7506057a0496" containerName="extract-utilities" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.364831 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a68b9744-48fb-4c32-ac22-7506057a0496" containerName="registry-server" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.364858 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="6dd52811-14b2-4c08-99ac-a3018d0ae6de" containerName="cinder-scheduler" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.364875 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="47942ee1-102a-42b0-a25a-e41b9a020d95" containerName="dnsmasq-dns" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.364882 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="6dd52811-14b2-4c08-99ac-a3018d0ae6de" containerName="probe" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.366995 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.372643 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.381947 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.495225 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6dd52811-14b2-4c08-99ac-a3018d0ae6de" path="/var/lib/kubelet/pods/6dd52811-14b2-4c08-99ac-a3018d0ae6de/volumes" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.496089 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a68b9744-48fb-4c32-ac22-7506057a0496" path="/var/lib/kubelet/pods/a68b9744-48fb-4c32-ac22-7506057a0496/volumes" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.509144 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9zwh\" (UniqueName: \"kubernetes.io/projected/6bf50105-b28f-4123-a6f8-75124e213fcc-kube-api-access-d9zwh\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.509212 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bf50105-b28f-4123-a6f8-75124e213fcc-scripts\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.509385 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bf50105-b28f-4123-a6f8-75124e213fcc-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.509559 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bf50105-b28f-4123-a6f8-75124e213fcc-config-data\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.509652 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6bf50105-b28f-4123-a6f8-75124e213fcc-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.509715 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6bf50105-b28f-4123-a6f8-75124e213fcc-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.613555 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bf50105-b28f-4123-a6f8-75124e213fcc-config-data\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.613674 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6bf50105-b28f-4123-a6f8-75124e213fcc-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.613738 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6bf50105-b28f-4123-a6f8-75124e213fcc-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.613863 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9zwh\" (UniqueName: \"kubernetes.io/projected/6bf50105-b28f-4123-a6f8-75124e213fcc-kube-api-access-d9zwh\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.613908 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bf50105-b28f-4123-a6f8-75124e213fcc-scripts\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.613905 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6bf50105-b28f-4123-a6f8-75124e213fcc-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.614223 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bf50105-b28f-4123-a6f8-75124e213fcc-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.621011 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6bf50105-b28f-4123-a6f8-75124e213fcc-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.621885 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bf50105-b28f-4123-a6f8-75124e213fcc-config-data\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.622758 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bf50105-b28f-4123-a6f8-75124e213fcc-scripts\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.625638 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bf50105-b28f-4123-a6f8-75124e213fcc-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.644115 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9zwh\" (UniqueName: \"kubernetes.io/projected/6bf50105-b28f-4123-a6f8-75124e213fcc-kube-api-access-d9zwh\") pod \"cinder-scheduler-0\" (UID: \"6bf50105-b28f-4123-a6f8-75124e213fcc\") " pod="openstack/cinder-scheduler-0" Jan 27 12:52:12 crc kubenswrapper[4900]: I0127 12:52:12.688414 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 27 12:52:13 crc kubenswrapper[4900]: I0127 12:52:13.228362 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 27 12:52:13 crc kubenswrapper[4900]: I0127 12:52:13.256081 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6bf50105-b28f-4123-a6f8-75124e213fcc","Type":"ContainerStarted","Data":"3f5cb5a64637168ae843ae1d47b6b54daf17a5034df68bde32893c2180075774"} Jan 27 12:52:14 crc kubenswrapper[4900]: I0127 12:52:14.279699 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6bf50105-b28f-4123-a6f8-75124e213fcc","Type":"ContainerStarted","Data":"af3c570545cf826144035311b08eba9b878bb342fee4e73f72daffd5d60b8907"} Jan 27 12:52:14 crc kubenswrapper[4900]: I0127 12:52:14.814253 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 27 12:52:14 crc kubenswrapper[4900]: I0127 12:52:14.816371 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 27 12:52:14 crc kubenswrapper[4900]: I0127 12:52:14.818999 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 27 12:52:14 crc kubenswrapper[4900]: I0127 12:52:14.819791 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 27 12:52:14 crc kubenswrapper[4900]: I0127 12:52:14.822312 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-79j8x" Jan 27 12:52:14 crc kubenswrapper[4900]: I0127 12:52:14.843312 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 27 12:52:14 crc kubenswrapper[4900]: I0127 12:52:14.984564 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ded5eaa8-8d7d-4ee2-bad6-62da18024e33-openstack-config\") pod \"openstackclient\" (UID: \"ded5eaa8-8d7d-4ee2-bad6-62da18024e33\") " pod="openstack/openstackclient" Jan 27 12:52:14 crc kubenswrapper[4900]: I0127 12:52:14.984680 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6jtl\" (UniqueName: \"kubernetes.io/projected/ded5eaa8-8d7d-4ee2-bad6-62da18024e33-kube-api-access-f6jtl\") pod \"openstackclient\" (UID: \"ded5eaa8-8d7d-4ee2-bad6-62da18024e33\") " pod="openstack/openstackclient" Jan 27 12:52:14 crc kubenswrapper[4900]: I0127 12:52:14.985006 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ded5eaa8-8d7d-4ee2-bad6-62da18024e33-openstack-config-secret\") pod \"openstackclient\" (UID: \"ded5eaa8-8d7d-4ee2-bad6-62da18024e33\") " pod="openstack/openstackclient" Jan 27 12:52:14 crc kubenswrapper[4900]: I0127 12:52:14.985310 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ded5eaa8-8d7d-4ee2-bad6-62da18024e33-combined-ca-bundle\") pod \"openstackclient\" (UID: \"ded5eaa8-8d7d-4ee2-bad6-62da18024e33\") " pod="openstack/openstackclient" Jan 27 12:52:15 crc kubenswrapper[4900]: I0127 12:52:15.087348 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ded5eaa8-8d7d-4ee2-bad6-62da18024e33-openstack-config-secret\") pod \"openstackclient\" (UID: \"ded5eaa8-8d7d-4ee2-bad6-62da18024e33\") " pod="openstack/openstackclient" Jan 27 12:52:15 crc kubenswrapper[4900]: I0127 12:52:15.087455 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ded5eaa8-8d7d-4ee2-bad6-62da18024e33-combined-ca-bundle\") pod \"openstackclient\" (UID: \"ded5eaa8-8d7d-4ee2-bad6-62da18024e33\") " pod="openstack/openstackclient" Jan 27 12:52:15 crc kubenswrapper[4900]: I0127 12:52:15.087611 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ded5eaa8-8d7d-4ee2-bad6-62da18024e33-openstack-config\") pod \"openstackclient\" (UID: \"ded5eaa8-8d7d-4ee2-bad6-62da18024e33\") " pod="openstack/openstackclient" Jan 27 12:52:15 crc kubenswrapper[4900]: I0127 12:52:15.087636 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6jtl\" (UniqueName: \"kubernetes.io/projected/ded5eaa8-8d7d-4ee2-bad6-62da18024e33-kube-api-access-f6jtl\") pod \"openstackclient\" (UID: \"ded5eaa8-8d7d-4ee2-bad6-62da18024e33\") " pod="openstack/openstackclient" Jan 27 12:52:15 crc kubenswrapper[4900]: I0127 12:52:15.089395 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ded5eaa8-8d7d-4ee2-bad6-62da18024e33-openstack-config\") pod \"openstackclient\" (UID: \"ded5eaa8-8d7d-4ee2-bad6-62da18024e33\") " pod="openstack/openstackclient" Jan 27 12:52:15 crc kubenswrapper[4900]: I0127 12:52:15.094599 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ded5eaa8-8d7d-4ee2-bad6-62da18024e33-combined-ca-bundle\") pod \"openstackclient\" (UID: \"ded5eaa8-8d7d-4ee2-bad6-62da18024e33\") " pod="openstack/openstackclient" Jan 27 12:52:15 crc kubenswrapper[4900]: I0127 12:52:15.101691 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ded5eaa8-8d7d-4ee2-bad6-62da18024e33-openstack-config-secret\") pod \"openstackclient\" (UID: \"ded5eaa8-8d7d-4ee2-bad6-62da18024e33\") " pod="openstack/openstackclient" Jan 27 12:52:15 crc kubenswrapper[4900]: I0127 12:52:15.126884 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6jtl\" (UniqueName: \"kubernetes.io/projected/ded5eaa8-8d7d-4ee2-bad6-62da18024e33-kube-api-access-f6jtl\") pod \"openstackclient\" (UID: \"ded5eaa8-8d7d-4ee2-bad6-62da18024e33\") " pod="openstack/openstackclient" Jan 27 12:52:15 crc kubenswrapper[4900]: I0127 12:52:15.158795 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 27 12:52:15 crc kubenswrapper[4900]: I0127 12:52:15.304539 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6bf50105-b28f-4123-a6f8-75124e213fcc","Type":"ContainerStarted","Data":"dffa7c229f15b24b6f07b4e9ae15282572b313e6a7a26b317316e643acaae9cf"} Jan 27 12:52:15 crc kubenswrapper[4900]: I0127 12:52:15.371435 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.371395443 podStartE2EDuration="3.371395443s" podCreationTimestamp="2026-01-27 12:52:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:52:15.325710331 +0000 UTC m=+1562.562738541" watchObservedRunningTime="2026-01-27 12:52:15.371395443 +0000 UTC m=+1562.608423653" Jan 27 12:52:15 crc kubenswrapper[4900]: I0127 12:52:15.836866 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 27 12:52:16 crc kubenswrapper[4900]: I0127 12:52:16.321633 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"ded5eaa8-8d7d-4ee2-bad6-62da18024e33","Type":"ContainerStarted","Data":"3114f6e851972d82d54d086dd3beda0ed3d901d452ed2b30689caa9bc64e7d55"} Jan 27 12:52:17 crc kubenswrapper[4900]: I0127 12:52:17.689590 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.088572 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nqj4r"] Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.092279 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.105944 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nqj4r"] Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.190801 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c9gt\" (UniqueName: \"kubernetes.io/projected/78573fe2-69fe-42f2-9196-304828739af1-kube-api-access-2c9gt\") pod \"community-operators-nqj4r\" (UID: \"78573fe2-69fe-42f2-9196-304828739af1\") " pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.190973 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78573fe2-69fe-42f2-9196-304828739af1-utilities\") pod \"community-operators-nqj4r\" (UID: \"78573fe2-69fe-42f2-9196-304828739af1\") " pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.191072 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78573fe2-69fe-42f2-9196-304828739af1-catalog-content\") pod \"community-operators-nqj4r\" (UID: \"78573fe2-69fe-42f2-9196-304828739af1\") " pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.293313 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c9gt\" (UniqueName: \"kubernetes.io/projected/78573fe2-69fe-42f2-9196-304828739af1-kube-api-access-2c9gt\") pod \"community-operators-nqj4r\" (UID: \"78573fe2-69fe-42f2-9196-304828739af1\") " pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.293408 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78573fe2-69fe-42f2-9196-304828739af1-utilities\") pod \"community-operators-nqj4r\" (UID: \"78573fe2-69fe-42f2-9196-304828739af1\") " pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.293476 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78573fe2-69fe-42f2-9196-304828739af1-catalog-content\") pod \"community-operators-nqj4r\" (UID: \"78573fe2-69fe-42f2-9196-304828739af1\") " pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.294263 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78573fe2-69fe-42f2-9196-304828739af1-catalog-content\") pod \"community-operators-nqj4r\" (UID: \"78573fe2-69fe-42f2-9196-304828739af1\") " pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.294902 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78573fe2-69fe-42f2-9196-304828739af1-utilities\") pod \"community-operators-nqj4r\" (UID: \"78573fe2-69fe-42f2-9196-304828739af1\") " pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.321870 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c9gt\" (UniqueName: \"kubernetes.io/projected/78573fe2-69fe-42f2-9196-304828739af1-kube-api-access-2c9gt\") pod \"community-operators-nqj4r\" (UID: \"78573fe2-69fe-42f2-9196-304828739af1\") " pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.403589 4900 generic.go:334] "Generic (PLEG): container finished" podID="a644fee2-5031-470c-a287-34a8963d86c7" containerID="5a947b4b0098e3c9a2e3a190832f11ce33e05671f72352a8f7046c772ac88153" exitCode=137 Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.403763 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" event={"ID":"a644fee2-5031-470c-a287-34a8963d86c7","Type":"ContainerDied","Data":"5a947b4b0098e3c9a2e3a190832f11ce33e05671f72352a8f7046c772ac88153"} Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.429766 4900 generic.go:334] "Generic (PLEG): container finished" podID="4c216bce-8265-4cdb-8104-6267c1196cc2" containerID="1e5fde54d05e0651d0427012836c57a018b958468e6b63d0de2ee6027e36a9d6" exitCode=137 Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.429817 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5657957b69-ncpk6" event={"ID":"4c216bce-8265-4cdb-8104-6267c1196cc2","Type":"ContainerDied","Data":"1e5fde54d05e0651d0427012836c57a018b958468e6b63d0de2ee6027e36a9d6"} Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.449422 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.754554 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.762112 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.817510 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-config-data\") pod \"4c216bce-8265-4cdb-8104-6267c1196cc2\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.817604 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjj5f\" (UniqueName: \"kubernetes.io/projected/4c216bce-8265-4cdb-8104-6267c1196cc2-kube-api-access-wjj5f\") pod \"4c216bce-8265-4cdb-8104-6267c1196cc2\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.817651 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-combined-ca-bundle\") pod \"4c216bce-8265-4cdb-8104-6267c1196cc2\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.817698 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c216bce-8265-4cdb-8104-6267c1196cc2-logs\") pod \"4c216bce-8265-4cdb-8104-6267c1196cc2\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.817805 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-config-data-custom\") pod \"4c216bce-8265-4cdb-8104-6267c1196cc2\" (UID: \"4c216bce-8265-4cdb-8104-6267c1196cc2\") " Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.824722 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4c216bce-8265-4cdb-8104-6267c1196cc2" (UID: "4c216bce-8265-4cdb-8104-6267c1196cc2"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.825120 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c216bce-8265-4cdb-8104-6267c1196cc2-logs" (OuterVolumeSpecName: "logs") pod "4c216bce-8265-4cdb-8104-6267c1196cc2" (UID: "4c216bce-8265-4cdb-8104-6267c1196cc2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.825143 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c216bce-8265-4cdb-8104-6267c1196cc2-kube-api-access-wjj5f" (OuterVolumeSpecName: "kube-api-access-wjj5f") pod "4c216bce-8265-4cdb-8104-6267c1196cc2" (UID: "4c216bce-8265-4cdb-8104-6267c1196cc2"). InnerVolumeSpecName "kube-api-access-wjj5f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.920217 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-config-data-custom\") pod \"a644fee2-5031-470c-a287-34a8963d86c7\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.920309 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-config-data\") pod \"a644fee2-5031-470c-a287-34a8963d86c7\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.920387 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-combined-ca-bundle\") pod \"a644fee2-5031-470c-a287-34a8963d86c7\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.920438 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a644fee2-5031-470c-a287-34a8963d86c7-logs\") pod \"a644fee2-5031-470c-a287-34a8963d86c7\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.920532 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82qvb\" (UniqueName: \"kubernetes.io/projected/a644fee2-5031-470c-a287-34a8963d86c7-kube-api-access-82qvb\") pod \"a644fee2-5031-470c-a287-34a8963d86c7\" (UID: \"a644fee2-5031-470c-a287-34a8963d86c7\") " Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.921041 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjj5f\" (UniqueName: \"kubernetes.io/projected/4c216bce-8265-4cdb-8104-6267c1196cc2-kube-api-access-wjj5f\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.921071 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4c216bce-8265-4cdb-8104-6267c1196cc2-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.921081 4900 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.939377 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a644fee2-5031-470c-a287-34a8963d86c7-logs" (OuterVolumeSpecName: "logs") pod "a644fee2-5031-470c-a287-34a8963d86c7" (UID: "a644fee2-5031-470c-a287-34a8963d86c7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.956406 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a644fee2-5031-470c-a287-34a8963d86c7-kube-api-access-82qvb" (OuterVolumeSpecName: "kube-api-access-82qvb") pod "a644fee2-5031-470c-a287-34a8963d86c7" (UID: "a644fee2-5031-470c-a287-34a8963d86c7"). InnerVolumeSpecName "kube-api-access-82qvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:18 crc kubenswrapper[4900]: I0127 12:52:18.956514 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a644fee2-5031-470c-a287-34a8963d86c7" (UID: "a644fee2-5031-470c-a287-34a8963d86c7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:18.998782 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c216bce-8265-4cdb-8104-6267c1196cc2" (UID: "4c216bce-8265-4cdb-8104-6267c1196cc2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.047516 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a644fee2-5031-470c-a287-34a8963d86c7-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.047574 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.047587 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82qvb\" (UniqueName: \"kubernetes.io/projected/a644fee2-5031-470c-a287-34a8963d86c7-kube-api-access-82qvb\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.047598 4900 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.058357 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-config-data" (OuterVolumeSpecName: "config-data") pod "4c216bce-8265-4cdb-8104-6267c1196cc2" (UID: "4c216bce-8265-4cdb-8104-6267c1196cc2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.098723 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a644fee2-5031-470c-a287-34a8963d86c7" (UID: "a644fee2-5031-470c-a287-34a8963d86c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.144728 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-78cfbd797c-pm2mm"] Jan 27 12:52:19 crc kubenswrapper[4900]: E0127 12:52:19.145451 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c216bce-8265-4cdb-8104-6267c1196cc2" containerName="barbican-worker-log" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.145467 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c216bce-8265-4cdb-8104-6267c1196cc2" containerName="barbican-worker-log" Jan 27 12:52:19 crc kubenswrapper[4900]: E0127 12:52:19.145485 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a644fee2-5031-470c-a287-34a8963d86c7" containerName="barbican-keystone-listener" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.145493 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a644fee2-5031-470c-a287-34a8963d86c7" containerName="barbican-keystone-listener" Jan 27 12:52:19 crc kubenswrapper[4900]: E0127 12:52:19.145520 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c216bce-8265-4cdb-8104-6267c1196cc2" containerName="barbican-worker" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.145528 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c216bce-8265-4cdb-8104-6267c1196cc2" containerName="barbican-worker" Jan 27 12:52:19 crc kubenswrapper[4900]: E0127 12:52:19.145542 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a644fee2-5031-470c-a287-34a8963d86c7" containerName="barbican-keystone-listener-log" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.145548 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a644fee2-5031-470c-a287-34a8963d86c7" containerName="barbican-keystone-listener-log" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.146035 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c216bce-8265-4cdb-8104-6267c1196cc2" containerName="barbican-worker" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.146072 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a644fee2-5031-470c-a287-34a8963d86c7" containerName="barbican-keystone-listener" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.146095 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a644fee2-5031-470c-a287-34a8963d86c7" containerName="barbican-keystone-listener-log" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.146125 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c216bce-8265-4cdb-8104-6267c1196cc2" containerName="barbican-worker-log" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.148204 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.150636 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.150661 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c216bce-8265-4cdb-8104-6267c1196cc2-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.164736 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.165142 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-gwwtc" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.173673 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.227932 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-78cfbd797c-pm2mm"] Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.256507 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-config-data-custom\") pod \"heat-engine-78cfbd797c-pm2mm\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.256854 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-config-data\") pod \"heat-engine-78cfbd797c-pm2mm\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.265295 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcm8g\" (UniqueName: \"kubernetes.io/projected/c23e8577-70e1-4e21-a841-6c34251756f7-kube-api-access-vcm8g\") pod \"heat-engine-78cfbd797c-pm2mm\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.265529 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-combined-ca-bundle\") pod \"heat-engine-78cfbd797c-pm2mm\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.292898 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-config-data" (OuterVolumeSpecName: "config-data") pod "a644fee2-5031-470c-a287-34a8963d86c7" (UID: "a644fee2-5031-470c-a287-34a8963d86c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.369158 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcm8g\" (UniqueName: \"kubernetes.io/projected/c23e8577-70e1-4e21-a841-6c34251756f7-kube-api-access-vcm8g\") pod \"heat-engine-78cfbd797c-pm2mm\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.372455 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-combined-ca-bundle\") pod \"heat-engine-78cfbd797c-pm2mm\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.372741 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-config-data-custom\") pod \"heat-engine-78cfbd797c-pm2mm\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.373028 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-config-data\") pod \"heat-engine-78cfbd797c-pm2mm\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.373249 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a644fee2-5031-470c-a287-34a8963d86c7-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.374253 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nqj4r"] Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.393381 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-config-data\") pod \"heat-engine-78cfbd797c-pm2mm\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.395816 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-config-data-custom\") pod \"heat-engine-78cfbd797c-pm2mm\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.396204 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-combined-ca-bundle\") pod \"heat-engine-78cfbd797c-pm2mm\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.405684 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcm8g\" (UniqueName: \"kubernetes.io/projected/c23e8577-70e1-4e21-a841-6c34251756f7-kube-api-access-vcm8g\") pod \"heat-engine-78cfbd797c-pm2mm\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.424160 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-nlfhl"] Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.427341 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.552627 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-758b8d686f-mcf4h"] Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.556444 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.573848 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.576852 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.611624 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-nlfhl"] Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.642638 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-758b8d686f-mcf4h"] Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.678312 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5657957b69-ncpk6" event={"ID":"4c216bce-8265-4cdb-8104-6267c1196cc2","Type":"ContainerDied","Data":"093fa79bcd6f44b8a67cd3113d871d429307d75c8c97c9b12837fdbae88949c3"} Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.678383 4900 scope.go:117] "RemoveContainer" containerID="1e5fde54d05e0651d0427012836c57a018b958468e6b63d0de2ee6027e36a9d6" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.678580 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5657957b69-ncpk6" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.705529 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nqj4r" event={"ID":"78573fe2-69fe-42f2-9196-304828739af1","Type":"ContainerStarted","Data":"dee8f4fc940b93d6acabd29df0ae07004aa7e3ba87a1e06f3a7a6d3f1fefe0ca"} Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.726989 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.727193 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-config\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.727463 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.727504 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-combined-ca-bundle\") pod \"heat-cfnapi-758b8d686f-mcf4h\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.727556 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-config-data-custom\") pod \"heat-cfnapi-758b8d686f-mcf4h\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.727616 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vl2kt\" (UniqueName: \"kubernetes.io/projected/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-kube-api-access-vl2kt\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.727708 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6kd4\" (UniqueName: \"kubernetes.io/projected/d305c1d2-157a-4980-8c1a-f02acd4bc99e-kube-api-access-l6kd4\") pod \"heat-cfnapi-758b8d686f-mcf4h\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.727742 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.727791 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-config-data\") pod \"heat-cfnapi-758b8d686f-mcf4h\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.728179 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.731693 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-689d5949ff-jt4qp"] Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.754783 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.755108 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-59b569d4f4-bkjxc" event={"ID":"a644fee2-5031-470c-a287-34a8963d86c7","Type":"ContainerDied","Data":"2334fb0a8a93290269c8f8d4127e11b7a23b041e00afc292d6a4ef5b379574f6"} Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.755318 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.765551 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.768271 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-689d5949ff-jt4qp"] Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.799446 4900 scope.go:117] "RemoveContainer" containerID="8c43da896c9f4193b0949bf2d40b57eec81f0be251163c55686a409d54e75844" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.842974 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.844657 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-combined-ca-bundle\") pod \"heat-cfnapi-758b8d686f-mcf4h\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.844732 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-config-data-custom\") pod \"heat-cfnapi-758b8d686f-mcf4h\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.844814 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vl2kt\" (UniqueName: \"kubernetes.io/projected/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-kube-api-access-vl2kt\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.844918 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6kd4\" (UniqueName: \"kubernetes.io/projected/d305c1d2-157a-4980-8c1a-f02acd4bc99e-kube-api-access-l6kd4\") pod \"heat-cfnapi-758b8d686f-mcf4h\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.844935 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.844976 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-config-data\") pod \"heat-cfnapi-758b8d686f-mcf4h\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.846618 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-config-data\") pod \"heat-api-689d5949ff-jt4qp\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.846759 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfsm7\" (UniqueName: \"kubernetes.io/projected/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-kube-api-access-nfsm7\") pod \"heat-api-689d5949ff-jt4qp\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.846884 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-config-data-custom\") pod \"heat-api-689d5949ff-jt4qp\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.846976 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.847074 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.847369 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-config\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.847604 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-combined-ca-bundle\") pod \"heat-api-689d5949ff-jt4qp\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.850027 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.850919 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.852135 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.852490 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.855802 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-config-data-custom\") pod \"heat-cfnapi-758b8d686f-mcf4h\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.860771 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-combined-ca-bundle\") pod \"heat-cfnapi-758b8d686f-mcf4h\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.867638 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-config\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.878101 4900 scope.go:117] "RemoveContainer" containerID="5a947b4b0098e3c9a2e3a190832f11ce33e05671f72352a8f7046c772ac88153" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.887369 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-config-data\") pod \"heat-cfnapi-758b8d686f-mcf4h\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.911996 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6kd4\" (UniqueName: \"kubernetes.io/projected/d305c1d2-157a-4980-8c1a-f02acd4bc99e-kube-api-access-l6kd4\") pod \"heat-cfnapi-758b8d686f-mcf4h\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.917849 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vl2kt\" (UniqueName: \"kubernetes.io/projected/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-kube-api-access-vl2kt\") pod \"dnsmasq-dns-f6bc4c6c9-nlfhl\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.918695 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-5657957b69-ncpk6"] Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.920903 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.934753 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-5657957b69-ncpk6"] Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.951190 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-config-data\") pod \"heat-api-689d5949ff-jt4qp\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.951266 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfsm7\" (UniqueName: \"kubernetes.io/projected/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-kube-api-access-nfsm7\") pod \"heat-api-689d5949ff-jt4qp\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.951327 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-config-data-custom\") pod \"heat-api-689d5949ff-jt4qp\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.952010 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-combined-ca-bundle\") pod \"heat-api-689d5949ff-jt4qp\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.958228 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-combined-ca-bundle\") pod \"heat-api-689d5949ff-jt4qp\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.961135 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-config-data\") pod \"heat-api-689d5949ff-jt4qp\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.969756 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-config-data-custom\") pod \"heat-api-689d5949ff-jt4qp\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.986482 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-59b569d4f4-bkjxc"] Jan 27 12:52:19 crc kubenswrapper[4900]: I0127 12:52:19.988384 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfsm7\" (UniqueName: \"kubernetes.io/projected/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-kube-api-access-nfsm7\") pod \"heat-api-689d5949ff-jt4qp\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:20 crc kubenswrapper[4900]: I0127 12:52:20.043678 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-59b569d4f4-bkjxc"] Jan 27 12:52:20 crc kubenswrapper[4900]: I0127 12:52:20.119270 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:20 crc kubenswrapper[4900]: I0127 12:52:20.154912 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:20 crc kubenswrapper[4900]: I0127 12:52:20.356919 4900 scope.go:117] "RemoveContainer" containerID="07095d5ca62ecb4330fb45031bd07ea2698b438ba2ee91ee52daf2922b4eff50" Jan 27 12:52:20 crc kubenswrapper[4900]: I0127 12:52:20.441495 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-78cfbd797c-pm2mm"] Jan 27 12:52:20 crc kubenswrapper[4900]: I0127 12:52:20.566689 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c216bce-8265-4cdb-8104-6267c1196cc2" path="/var/lib/kubelet/pods/4c216bce-8265-4cdb-8104-6267c1196cc2/volumes" Jan 27 12:52:20 crc kubenswrapper[4900]: I0127 12:52:20.570129 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a644fee2-5031-470c-a287-34a8963d86c7" path="/var/lib/kubelet/pods/a644fee2-5031-470c-a287-34a8963d86c7/volumes" Jan 27 12:52:20 crc kubenswrapper[4900]: I0127 12:52:20.797094 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-758b8d686f-mcf4h"] Jan 27 12:52:20 crc kubenswrapper[4900]: I0127 12:52:20.817818 4900 generic.go:334] "Generic (PLEG): container finished" podID="78573fe2-69fe-42f2-9196-304828739af1" containerID="bc3f1d0846cf21c742949349e9446c182df389f47851790609f674e1605c3bcb" exitCode=0 Jan 27 12:52:20 crc kubenswrapper[4900]: I0127 12:52:20.818400 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nqj4r" event={"ID":"78573fe2-69fe-42f2-9196-304828739af1","Type":"ContainerDied","Data":"bc3f1d0846cf21c742949349e9446c182df389f47851790609f674e1605c3bcb"} Jan 27 12:52:20 crc kubenswrapper[4900]: I0127 12:52:20.850014 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-78cfbd797c-pm2mm" event={"ID":"c23e8577-70e1-4e21-a841-6c34251756f7","Type":"ContainerStarted","Data":"7d096827b0b4bab5e06039d470d7968bb32506e256adb95e5d8c95ed49fcd8d9"} Jan 27 12:52:21 crc kubenswrapper[4900]: I0127 12:52:21.061092 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-689d5949ff-jt4qp"] Jan 27 12:52:21 crc kubenswrapper[4900]: I0127 12:52:21.217910 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-nlfhl"] Jan 27 12:52:21 crc kubenswrapper[4900]: I0127 12:52:21.940160 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-758b8d686f-mcf4h" event={"ID":"d305c1d2-157a-4980-8c1a-f02acd4bc99e","Type":"ContainerStarted","Data":"7f5cf233d589dd93ec093d5a1afa4917be7047c7999418ececcf6664574d051e"} Jan 27 12:52:21 crc kubenswrapper[4900]: I0127 12:52:21.949972 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nqj4r" event={"ID":"78573fe2-69fe-42f2-9196-304828739af1","Type":"ContainerStarted","Data":"a1a91d635ddacbe8262aae3cbffbdc769453c520a7cedf73ab7fd5040ace1954"} Jan 27 12:52:21 crc kubenswrapper[4900]: I0127 12:52:21.964485 4900 generic.go:334] "Generic (PLEG): container finished" podID="e3964262-2f6d-40bd-8c37-6cf8ccf1312a" containerID="e4b61be158c5837607b67cb0c510d24fb7c5d0f4e84cf1ab162815edcebc8546" exitCode=0 Jan 27 12:52:21 crc kubenswrapper[4900]: I0127 12:52:21.967731 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" event={"ID":"e3964262-2f6d-40bd-8c37-6cf8ccf1312a","Type":"ContainerDied","Data":"e4b61be158c5837607b67cb0c510d24fb7c5d0f4e84cf1ab162815edcebc8546"} Jan 27 12:52:21 crc kubenswrapper[4900]: I0127 12:52:21.967813 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" event={"ID":"e3964262-2f6d-40bd-8c37-6cf8ccf1312a","Type":"ContainerStarted","Data":"c0d75bf1733a77aa847357bf3ab5a213fa48395a4f2086192f3a8da4582811a1"} Jan 27 12:52:21 crc kubenswrapper[4900]: I0127 12:52:21.976948 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-689d5949ff-jt4qp" event={"ID":"a01e3768-681f-4cd6-8614-ecc16a8f5b0e","Type":"ContainerStarted","Data":"85d6c36ff26f350bc361ade16022732a53716a487845c617c6b36a77ad9a1369"} Jan 27 12:52:21 crc kubenswrapper[4900]: I0127 12:52:21.991472 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-78cfbd797c-pm2mm" event={"ID":"c23e8577-70e1-4e21-a841-6c34251756f7","Type":"ContainerStarted","Data":"2def81c69cc67d433add5bf662b4a8aa7fa813aaf1c30023faacb90dfb792017"} Jan 27 12:52:21 crc kubenswrapper[4900]: I0127 12:52:21.992788 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:22 crc kubenswrapper[4900]: E0127 12:52:22.177695 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode3964262_2f6d_40bd_8c37_6cf8ccf1312a.slice/crio-conmon-e4b61be158c5837607b67cb0c510d24fb7c5d0f4e84cf1ab162815edcebc8546.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode3964262_2f6d_40bd_8c37_6cf8ccf1312a.slice/crio-e4b61be158c5837607b67cb0c510d24fb7c5d0f4e84cf1ab162815edcebc8546.scope\": RecentStats: unable to find data in memory cache]" Jan 27 12:52:22 crc kubenswrapper[4900]: I0127 12:52:22.201981 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-78cfbd797c-pm2mm" podStartSLOduration=3.201944823 podStartE2EDuration="3.201944823s" podCreationTimestamp="2026-01-27 12:52:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:52:22.071334845 +0000 UTC m=+1569.308363085" watchObservedRunningTime="2026-01-27 12:52:22.201944823 +0000 UTC m=+1569.438973033" Jan 27 12:52:22 crc kubenswrapper[4900]: I0127 12:52:22.372435 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:52:22 crc kubenswrapper[4900]: I0127 12:52:22.372784 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:52:23 crc kubenswrapper[4900]: I0127 12:52:23.096679 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" event={"ID":"e3964262-2f6d-40bd-8c37-6cf8ccf1312a","Type":"ContainerStarted","Data":"8c1d9e3045ec4e5afd8ab3888e93dbc673e13d1a7754e872483add308efc6c44"} Jan 27 12:52:23 crc kubenswrapper[4900]: I0127 12:52:23.097388 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:23 crc kubenswrapper[4900]: I0127 12:52:23.131722 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" podStartSLOduration=4.131697596 podStartE2EDuration="4.131697596s" podCreationTimestamp="2026-01-27 12:52:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:52:23.119709236 +0000 UTC m=+1570.356737466" watchObservedRunningTime="2026-01-27 12:52:23.131697596 +0000 UTC m=+1570.368725806" Jan 27 12:52:23 crc kubenswrapper[4900]: I0127 12:52:23.627839 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 27 12:52:25 crc kubenswrapper[4900]: I0127 12:52:25.082812 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-54df577b4f-lsr4f" Jan 27 12:52:25 crc kubenswrapper[4900]: I0127 12:52:25.167906 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5bf6b97d5b-nvggn"] Jan 27 12:52:25 crc kubenswrapper[4900]: I0127 12:52:25.168180 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5bf6b97d5b-nvggn" podUID="a5e02e88-5851-4d7a-93fe-39c857d7f2de" containerName="neutron-api" containerID="cri-o://f0b725221906d9a65689350818bed6a5afc6d225bf2f300009ad498ceae59e57" gracePeriod=30 Jan 27 12:52:25 crc kubenswrapper[4900]: I0127 12:52:25.168933 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5bf6b97d5b-nvggn" podUID="a5e02e88-5851-4d7a-93fe-39c857d7f2de" containerName="neutron-httpd" containerID="cri-o://c57a1f196de92f004e5836fa5b3e9a73328a8ac375d79a67ea6ae82c79b0b3e7" gracePeriod=30 Jan 27 12:52:25 crc kubenswrapper[4900]: I0127 12:52:25.197551 4900 generic.go:334] "Generic (PLEG): container finished" podID="78573fe2-69fe-42f2-9196-304828739af1" containerID="a1a91d635ddacbe8262aae3cbffbdc769453c520a7cedf73ab7fd5040ace1954" exitCode=0 Jan 27 12:52:25 crc kubenswrapper[4900]: I0127 12:52:25.198969 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nqj4r" event={"ID":"78573fe2-69fe-42f2-9196-304828739af1","Type":"ContainerDied","Data":"a1a91d635ddacbe8262aae3cbffbdc769453c520a7cedf73ab7fd5040ace1954"} Jan 27 12:52:25 crc kubenswrapper[4900]: I0127 12:52:25.461850 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:52:25 crc kubenswrapper[4900]: I0127 12:52:25.462225 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="ceilometer-central-agent" containerID="cri-o://d18819e0069d49a28d9cec8272e82136f3b390380a58369a4f08d2d98da08f9a" gracePeriod=30 Jan 27 12:52:25 crc kubenswrapper[4900]: I0127 12:52:25.464735 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="proxy-httpd" containerID="cri-o://58bf39b05777fadec75f938969df1684c256309712db6ae44ef0d01e2a406b7e" gracePeriod=30 Jan 27 12:52:25 crc kubenswrapper[4900]: I0127 12:52:25.465027 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="ceilometer-notification-agent" containerID="cri-o://255c80b80399b7f9ab2b447d472f0a9004a1e964b6a080dff6a20e18fa89fdf0" gracePeriod=30 Jan 27 12:52:25 crc kubenswrapper[4900]: I0127 12:52:25.465115 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="sg-core" containerID="cri-o://e8efa6c66a1b88295c17777f8906520c4063eb3e6714590fae2fe2c1e524121c" gracePeriod=30 Jan 27 12:52:25 crc kubenswrapper[4900]: I0127 12:52:25.487807 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.210:3000/\": EOF" Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.233079 4900 generic.go:334] "Generic (PLEG): container finished" podID="a5e02e88-5851-4d7a-93fe-39c857d7f2de" containerID="c57a1f196de92f004e5836fa5b3e9a73328a8ac375d79a67ea6ae82c79b0b3e7" exitCode=0 Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.233180 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5bf6b97d5b-nvggn" event={"ID":"a5e02e88-5851-4d7a-93fe-39c857d7f2de","Type":"ContainerDied","Data":"c57a1f196de92f004e5836fa5b3e9a73328a8ac375d79a67ea6ae82c79b0b3e7"} Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.236945 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-689d5949ff-jt4qp" event={"ID":"a01e3768-681f-4cd6-8614-ecc16a8f5b0e","Type":"ContainerStarted","Data":"3c2bb4855a6b6a583ceb15847108132eb4c62d810a0e93bc04a4d3948bb5f181"} Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.237079 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.244924 4900 generic.go:334] "Generic (PLEG): container finished" podID="33045450-fef6-4a24-a926-c91d19c2fe02" containerID="58bf39b05777fadec75f938969df1684c256309712db6ae44ef0d01e2a406b7e" exitCode=0 Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.244961 4900 generic.go:334] "Generic (PLEG): container finished" podID="33045450-fef6-4a24-a926-c91d19c2fe02" containerID="e8efa6c66a1b88295c17777f8906520c4063eb3e6714590fae2fe2c1e524121c" exitCode=2 Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.244972 4900 generic.go:334] "Generic (PLEG): container finished" podID="33045450-fef6-4a24-a926-c91d19c2fe02" containerID="d18819e0069d49a28d9cec8272e82136f3b390380a58369a4f08d2d98da08f9a" exitCode=0 Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.245030 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"33045450-fef6-4a24-a926-c91d19c2fe02","Type":"ContainerDied","Data":"58bf39b05777fadec75f938969df1684c256309712db6ae44ef0d01e2a406b7e"} Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.245115 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"33045450-fef6-4a24-a926-c91d19c2fe02","Type":"ContainerDied","Data":"e8efa6c66a1b88295c17777f8906520c4063eb3e6714590fae2fe2c1e524121c"} Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.245130 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"33045450-fef6-4a24-a926-c91d19c2fe02","Type":"ContainerDied","Data":"d18819e0069d49a28d9cec8272e82136f3b390380a58369a4f08d2d98da08f9a"} Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.257353 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nqj4r" event={"ID":"78573fe2-69fe-42f2-9196-304828739af1","Type":"ContainerStarted","Data":"352260c4d422dfb0d2c99d63e79f78e1be4da041df4b8f73c51499f307e02863"} Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.262153 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-758b8d686f-mcf4h" event={"ID":"d305c1d2-157a-4980-8c1a-f02acd4bc99e","Type":"ContainerStarted","Data":"07bb0c84e7a749595bd0af9497fa65642ef87c5d12ed762128d431f30625e718"} Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.262492 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.271575 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-689d5949ff-jt4qp" podStartSLOduration=2.823299503 podStartE2EDuration="7.271549476s" podCreationTimestamp="2026-01-27 12:52:19 +0000 UTC" firstStartedPulling="2026-01-27 12:52:21.069703976 +0000 UTC m=+1568.306732186" lastFinishedPulling="2026-01-27 12:52:25.517953949 +0000 UTC m=+1572.754982159" observedRunningTime="2026-01-27 12:52:26.26858306 +0000 UTC m=+1573.505611290" watchObservedRunningTime="2026-01-27 12:52:26.271549476 +0000 UTC m=+1573.508577696" Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.295502 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-758b8d686f-mcf4h" podStartSLOduration=2.627002411 podStartE2EDuration="7.295478314s" podCreationTimestamp="2026-01-27 12:52:19 +0000 UTC" firstStartedPulling="2026-01-27 12:52:20.849306311 +0000 UTC m=+1568.086334531" lastFinishedPulling="2026-01-27 12:52:25.517782234 +0000 UTC m=+1572.754810434" observedRunningTime="2026-01-27 12:52:26.290662104 +0000 UTC m=+1573.527690314" watchObservedRunningTime="2026-01-27 12:52:26.295478314 +0000 UTC m=+1573.532506524" Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.312658 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nqj4r" podStartSLOduration=3.198707207 podStartE2EDuration="8.312630464s" podCreationTimestamp="2026-01-27 12:52:18 +0000 UTC" firstStartedPulling="2026-01-27 12:52:20.821585083 +0000 UTC m=+1568.058613293" lastFinishedPulling="2026-01-27 12:52:25.93550834 +0000 UTC m=+1573.172536550" observedRunningTime="2026-01-27 12:52:26.308070571 +0000 UTC m=+1573.545098791" watchObservedRunningTime="2026-01-27 12:52:26.312630464 +0000 UTC m=+1573.549658684" Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.540193 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-7cb9669c67-gp7qj"] Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.543250 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.545721 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.546435 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 27 12:52:26 crc kubenswrapper[4900]: I0127 12:52:26.548965 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.135572 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/668f25a4-c69e-4696-849d-166f82f28d00-log-httpd\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.135796 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/668f25a4-c69e-4696-849d-166f82f28d00-run-httpd\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.135827 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/668f25a4-c69e-4696-849d-166f82f28d00-etc-swift\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.135916 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/668f25a4-c69e-4696-849d-166f82f28d00-public-tls-certs\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.136039 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/668f25a4-c69e-4696-849d-166f82f28d00-combined-ca-bundle\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.136226 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rl9kz\" (UniqueName: \"kubernetes.io/projected/668f25a4-c69e-4696-849d-166f82f28d00-kube-api-access-rl9kz\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.136300 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/668f25a4-c69e-4696-849d-166f82f28d00-internal-tls-certs\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.136357 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/668f25a4-c69e-4696-849d-166f82f28d00-config-data\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.143844 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7cb9669c67-gp7qj"] Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.241097 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rl9kz\" (UniqueName: \"kubernetes.io/projected/668f25a4-c69e-4696-849d-166f82f28d00-kube-api-access-rl9kz\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.242126 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/668f25a4-c69e-4696-849d-166f82f28d00-internal-tls-certs\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.242258 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/668f25a4-c69e-4696-849d-166f82f28d00-config-data\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.242429 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/668f25a4-c69e-4696-849d-166f82f28d00-log-httpd\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.242649 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/668f25a4-c69e-4696-849d-166f82f28d00-run-httpd\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.242737 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/668f25a4-c69e-4696-849d-166f82f28d00-etc-swift\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.242868 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/668f25a4-c69e-4696-849d-166f82f28d00-public-tls-certs\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.243036 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/668f25a4-c69e-4696-849d-166f82f28d00-combined-ca-bundle\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.243184 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/668f25a4-c69e-4696-849d-166f82f28d00-log-httpd\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.251054 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/668f25a4-c69e-4696-849d-166f82f28d00-public-tls-certs\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.252292 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/668f25a4-c69e-4696-849d-166f82f28d00-config-data\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.252526 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/668f25a4-c69e-4696-849d-166f82f28d00-run-httpd\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.252793 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/668f25a4-c69e-4696-849d-166f82f28d00-internal-tls-certs\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.253289 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/668f25a4-c69e-4696-849d-166f82f28d00-etc-swift\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.258884 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/668f25a4-c69e-4696-849d-166f82f28d00-combined-ca-bundle\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.269385 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rl9kz\" (UniqueName: \"kubernetes.io/projected/668f25a4-c69e-4696-849d-166f82f28d00-kube-api-access-rl9kz\") pod \"swift-proxy-7cb9669c67-gp7qj\" (UID: \"668f25a4-c69e-4696-849d-166f82f28d00\") " pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.481241 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:27 crc kubenswrapper[4900]: I0127 12:52:27.507765 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="5e07faff-922c-4bb3-a09e-e11135b3e369" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.209:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 12:52:28 crc kubenswrapper[4900]: I0127 12:52:28.036478 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sq822"] Jan 27 12:52:28 crc kubenswrapper[4900]: I0127 12:52:28.039376 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:52:28 crc kubenswrapper[4900]: I0127 12:52:28.067162 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sq822"] Jan 27 12:52:28 crc kubenswrapper[4900]: I0127 12:52:28.080250 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2dcf\" (UniqueName: \"kubernetes.io/projected/157a4f73-69ff-48a5-a97b-e78693201660-kube-api-access-k2dcf\") pod \"certified-operators-sq822\" (UID: \"157a4f73-69ff-48a5-a97b-e78693201660\") " pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:52:28 crc kubenswrapper[4900]: I0127 12:52:28.080742 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/157a4f73-69ff-48a5-a97b-e78693201660-utilities\") pod \"certified-operators-sq822\" (UID: \"157a4f73-69ff-48a5-a97b-e78693201660\") " pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:52:28 crc kubenswrapper[4900]: I0127 12:52:28.081553 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/157a4f73-69ff-48a5-a97b-e78693201660-catalog-content\") pod \"certified-operators-sq822\" (UID: \"157a4f73-69ff-48a5-a97b-e78693201660\") " pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:52:28 crc kubenswrapper[4900]: I0127 12:52:28.184904 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/157a4f73-69ff-48a5-a97b-e78693201660-utilities\") pod \"certified-operators-sq822\" (UID: \"157a4f73-69ff-48a5-a97b-e78693201660\") " pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:52:28 crc kubenswrapper[4900]: I0127 12:52:28.185092 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/157a4f73-69ff-48a5-a97b-e78693201660-catalog-content\") pod \"certified-operators-sq822\" (UID: \"157a4f73-69ff-48a5-a97b-e78693201660\") " pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:52:28 crc kubenswrapper[4900]: I0127 12:52:28.185151 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2dcf\" (UniqueName: \"kubernetes.io/projected/157a4f73-69ff-48a5-a97b-e78693201660-kube-api-access-k2dcf\") pod \"certified-operators-sq822\" (UID: \"157a4f73-69ff-48a5-a97b-e78693201660\") " pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:52:28 crc kubenswrapper[4900]: I0127 12:52:28.185957 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/157a4f73-69ff-48a5-a97b-e78693201660-utilities\") pod \"certified-operators-sq822\" (UID: \"157a4f73-69ff-48a5-a97b-e78693201660\") " pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:52:28 crc kubenswrapper[4900]: I0127 12:52:28.186222 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/157a4f73-69ff-48a5-a97b-e78693201660-catalog-content\") pod \"certified-operators-sq822\" (UID: \"157a4f73-69ff-48a5-a97b-e78693201660\") " pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:52:28 crc kubenswrapper[4900]: I0127 12:52:28.238172 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2dcf\" (UniqueName: \"kubernetes.io/projected/157a4f73-69ff-48a5-a97b-e78693201660-kube-api-access-k2dcf\") pod \"certified-operators-sq822\" (UID: \"157a4f73-69ff-48a5-a97b-e78693201660\") " pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:52:28 crc kubenswrapper[4900]: I0127 12:52:28.280620 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7cb9669c67-gp7qj"] Jan 27 12:52:28 crc kubenswrapper[4900]: I0127 12:52:28.942351 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.019131 4900 generic.go:334] "Generic (PLEG): container finished" podID="33045450-fef6-4a24-a926-c91d19c2fe02" containerID="255c80b80399b7f9ab2b447d472f0a9004a1e964b6a080dff6a20e18fa89fdf0" exitCode=0 Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.037425 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.037474 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.037486 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"33045450-fef6-4a24-a926-c91d19c2fe02","Type":"ContainerDied","Data":"255c80b80399b7f9ab2b447d472f0a9004a1e964b6a080dff6a20e18fa89fdf0"} Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.186425 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-55dc67899d-kc667"] Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.188438 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.220697 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5b76ffc8f4-6qc4h"] Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.225847 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.260787 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhzqx\" (UniqueName: \"kubernetes.io/projected/eed3fb3f-18d6-4382-85f1-77235d870e91-kube-api-access-zhzqx\") pod \"heat-engine-55dc67899d-kc667\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.261094 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-combined-ca-bundle\") pod \"heat-engine-55dc67899d-kc667\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.261264 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-config-data\") pod \"heat-engine-55dc67899d-kc667\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.261352 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-config-data-custom\") pod \"heat-engine-55dc67899d-kc667\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.263382 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-55dc67899d-kc667"] Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.308845 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7c966d6899-4tntc"] Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.311118 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.356646 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5b76ffc8f4-6qc4h"] Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.367315 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-combined-ca-bundle\") pod \"heat-cfnapi-7c966d6899-4tntc\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.367365 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-config-data-custom\") pod \"heat-engine-55dc67899d-kc667\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.367388 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-config-data-custom\") pod \"heat-cfnapi-7c966d6899-4tntc\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.367418 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-config-data\") pod \"heat-cfnapi-7c966d6899-4tntc\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.367453 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-config-data-custom\") pod \"heat-api-5b76ffc8f4-6qc4h\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.367505 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-config-data\") pod \"heat-api-5b76ffc8f4-6qc4h\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.367620 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-combined-ca-bundle\") pod \"heat-api-5b76ffc8f4-6qc4h\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.367719 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trhxj\" (UniqueName: \"kubernetes.io/projected/33681235-9f03-4254-9658-5948d6a9db37-kube-api-access-trhxj\") pod \"heat-api-5b76ffc8f4-6qc4h\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.367756 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brb4f\" (UniqueName: \"kubernetes.io/projected/39372723-3fe3-4691-b1ed-16d971262f96-kube-api-access-brb4f\") pod \"heat-cfnapi-7c966d6899-4tntc\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.367796 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhzqx\" (UniqueName: \"kubernetes.io/projected/eed3fb3f-18d6-4382-85f1-77235d870e91-kube-api-access-zhzqx\") pod \"heat-engine-55dc67899d-kc667\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.367858 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-combined-ca-bundle\") pod \"heat-engine-55dc67899d-kc667\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.367952 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-config-data\") pod \"heat-engine-55dc67899d-kc667\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.377746 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-config-data-custom\") pod \"heat-engine-55dc67899d-kc667\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.391995 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhzqx\" (UniqueName: \"kubernetes.io/projected/eed3fb3f-18d6-4382-85f1-77235d870e91-kube-api-access-zhzqx\") pod \"heat-engine-55dc67899d-kc667\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.400461 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-config-data\") pod \"heat-engine-55dc67899d-kc667\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.406757 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-combined-ca-bundle\") pod \"heat-engine-55dc67899d-kc667\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.421462 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7c966d6899-4tntc"] Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.475628 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trhxj\" (UniqueName: \"kubernetes.io/projected/33681235-9f03-4254-9658-5948d6a9db37-kube-api-access-trhxj\") pod \"heat-api-5b76ffc8f4-6qc4h\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.475711 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brb4f\" (UniqueName: \"kubernetes.io/projected/39372723-3fe3-4691-b1ed-16d971262f96-kube-api-access-brb4f\") pod \"heat-cfnapi-7c966d6899-4tntc\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.475876 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-combined-ca-bundle\") pod \"heat-cfnapi-7c966d6899-4tntc\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.475901 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-config-data-custom\") pod \"heat-cfnapi-7c966d6899-4tntc\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.475935 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-config-data\") pod \"heat-cfnapi-7c966d6899-4tntc\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.475983 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-config-data-custom\") pod \"heat-api-5b76ffc8f4-6qc4h\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.476042 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-config-data\") pod \"heat-api-5b76ffc8f4-6qc4h\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.476122 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-combined-ca-bundle\") pod \"heat-api-5b76ffc8f4-6qc4h\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.484042 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-config-data\") pod \"heat-cfnapi-7c966d6899-4tntc\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.485231 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-combined-ca-bundle\") pod \"heat-api-5b76ffc8f4-6qc4h\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.496416 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-config-data-custom\") pod \"heat-cfnapi-7c966d6899-4tntc\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.496502 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-config-data-custom\") pod \"heat-api-5b76ffc8f4-6qc4h\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.500140 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brb4f\" (UniqueName: \"kubernetes.io/projected/39372723-3fe3-4691-b1ed-16d971262f96-kube-api-access-brb4f\") pod \"heat-cfnapi-7c966d6899-4tntc\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.500876 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-combined-ca-bundle\") pod \"heat-cfnapi-7c966d6899-4tntc\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.502495 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trhxj\" (UniqueName: \"kubernetes.io/projected/33681235-9f03-4254-9658-5948d6a9db37-kube-api-access-trhxj\") pod \"heat-api-5b76ffc8f4-6qc4h\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.502840 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-config-data\") pod \"heat-api-5b76ffc8f4-6qc4h\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.520408 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.568228 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:29 crc kubenswrapper[4900]: I0127 12:52:29.654555 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:30 crc kubenswrapper[4900]: I0127 12:52:30.359422 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-nqj4r" podUID="78573fe2-69fe-42f2-9196-304828739af1" containerName="registry-server" probeResult="failure" output=< Jan 27 12:52:30 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 12:52:30 crc kubenswrapper[4900]: > Jan 27 12:52:30 crc kubenswrapper[4900]: I0127 12:52:30.364282 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:52:30 crc kubenswrapper[4900]: I0127 12:52:30.599185 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-89nlm"] Jan 27 12:52:30 crc kubenswrapper[4900]: I0127 12:52:30.599438 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" podUID="67de2b7f-64d0-4a97-b742-18fc0abb827c" containerName="dnsmasq-dns" containerID="cri-o://d38f55dfc8e94aa9f8e511c1d2ea8c1cff244e9f2614345e5eeb1ebad0610637" gracePeriod=10 Jan 27 12:52:31 crc kubenswrapper[4900]: I0127 12:52:31.414658 4900 generic.go:334] "Generic (PLEG): container finished" podID="67de2b7f-64d0-4a97-b742-18fc0abb827c" containerID="d38f55dfc8e94aa9f8e511c1d2ea8c1cff244e9f2614345e5eeb1ebad0610637" exitCode=0 Jan 27 12:52:31 crc kubenswrapper[4900]: I0127 12:52:31.414910 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" event={"ID":"67de2b7f-64d0-4a97-b742-18fc0abb827c","Type":"ContainerDied","Data":"d38f55dfc8e94aa9f8e511c1d2ea8c1cff244e9f2614345e5eeb1ebad0610637"} Jan 27 12:52:31 crc kubenswrapper[4900]: I0127 12:52:31.618205 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.210:3000/\": dial tcp 10.217.0.210:3000: connect: connection refused" Jan 27 12:52:32 crc kubenswrapper[4900]: I0127 12:52:32.435612 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" podUID="67de2b7f-64d0-4a97-b742-18fc0abb827c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.208:5353: connect: connection refused" Jan 27 12:52:32 crc kubenswrapper[4900]: I0127 12:52:32.461402 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="5e07faff-922c-4bb3-a09e-e11135b3e369" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.209:8776/healthcheck\": dial tcp 10.217.0.209:8776: connect: connection refused" Jan 27 12:52:32 crc kubenswrapper[4900]: I0127 12:52:32.854575 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-689d5949ff-jt4qp"] Jan 27 12:52:32 crc kubenswrapper[4900]: I0127 12:52:32.854912 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-689d5949ff-jt4qp" podUID="a01e3768-681f-4cd6-8614-ecc16a8f5b0e" containerName="heat-api" containerID="cri-o://3c2bb4855a6b6a583ceb15847108132eb4c62d810a0e93bc04a4d3948bb5f181" gracePeriod=60 Jan 27 12:52:32 crc kubenswrapper[4900]: I0127 12:52:32.880018 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-758b8d686f-mcf4h"] Jan 27 12:52:32 crc kubenswrapper[4900]: I0127 12:52:32.880271 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-758b8d686f-mcf4h" podUID="d305c1d2-157a-4980-8c1a-f02acd4bc99e" containerName="heat-cfnapi" containerID="cri-o://07bb0c84e7a749595bd0af9497fa65642ef87c5d12ed762128d431f30625e718" gracePeriod=60 Jan 27 12:52:32 crc kubenswrapper[4900]: I0127 12:52:32.897094 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/heat-api-689d5949ff-jt4qp" podUID="a01e3768-681f-4cd6-8614-ecc16a8f5b0e" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.217:8004/healthcheck\": EOF" Jan 27 12:52:32 crc kubenswrapper[4900]: I0127 12:52:32.897394 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-689d5949ff-jt4qp" podUID="a01e3768-681f-4cd6-8614-ecc16a8f5b0e" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.217:8004/healthcheck\": EOF" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.033433 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-799b6c7dd6-cz9kf"] Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.036025 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.045351 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.045607 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.074022 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-799b6c7dd6-cz9kf"] Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.145168 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-6f85ff8cdd-9lj8l"] Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.148160 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.153577 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.153852 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.200851 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-6f85ff8cdd-9lj8l"] Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.215830 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85kmk\" (UniqueName: \"kubernetes.io/projected/d30b65d6-3363-496a-97c9-8983b5332464-kube-api-access-85kmk\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.215964 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-config-data-custom\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.216022 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-combined-ca-bundle\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.216180 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-public-tls-certs\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.216422 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-internal-tls-certs\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.216503 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-config-data\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.336371 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85kmk\" (UniqueName: \"kubernetes.io/projected/d30b65d6-3363-496a-97c9-8983b5332464-kube-api-access-85kmk\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.336494 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-config-data\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.336551 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-config-data-custom\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.336600 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-combined-ca-bundle\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.336774 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-internal-tls-certs\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.336828 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-public-tls-certs\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.336988 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-public-tls-certs\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.337608 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-internal-tls-certs\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.337692 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-combined-ca-bundle\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.337766 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-config-data\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.337889 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dv7wb\" (UniqueName: \"kubernetes.io/projected/b44caf11-0144-4419-a3fd-49a686e81f0a-kube-api-access-dv7wb\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.337972 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-config-data-custom\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.353173 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-config-data\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.354569 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-public-tls-certs\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.361969 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-internal-tls-certs\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.364704 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-combined-ca-bundle\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.367877 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-config-data-custom\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.379648 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85kmk\" (UniqueName: \"kubernetes.io/projected/d30b65d6-3363-496a-97c9-8983b5332464-kube-api-access-85kmk\") pod \"heat-cfnapi-799b6c7dd6-cz9kf\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.380968 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.446732 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-public-tls-certs\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.446867 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-combined-ca-bundle\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.446938 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dv7wb\" (UniqueName: \"kubernetes.io/projected/b44caf11-0144-4419-a3fd-49a686e81f0a-kube-api-access-dv7wb\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.446988 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-config-data-custom\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.451301 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-config-data\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.451643 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-internal-tls-certs\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.463413 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-config-data\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.466917 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-public-tls-certs\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.466946 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-combined-ca-bundle\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.472305 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-internal-tls-certs\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.501417 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-config-data-custom\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.511705 4900 generic.go:334] "Generic (PLEG): container finished" podID="5e07faff-922c-4bb3-a09e-e11135b3e369" containerID="e37ceed8d91756b720f2c156156acea0cdf7a311ddadcd863a77eb716eed968f" exitCode=137 Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.512002 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5e07faff-922c-4bb3-a09e-e11135b3e369","Type":"ContainerDied","Data":"e37ceed8d91756b720f2c156156acea0cdf7a311ddadcd863a77eb716eed968f"} Jan 27 12:52:33 crc kubenswrapper[4900]: I0127 12:52:33.521178 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dv7wb\" (UniqueName: \"kubernetes.io/projected/b44caf11-0144-4419-a3fd-49a686e81f0a-kube-api-access-dv7wb\") pod \"heat-api-6f85ff8cdd-9lj8l\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:34 crc kubenswrapper[4900]: I0127 12:52:34.035240 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:35 crc kubenswrapper[4900]: I0127 12:52:35.906479 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:37 crc kubenswrapper[4900]: I0127 12:52:37.433554 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" podUID="67de2b7f-64d0-4a97-b742-18fc0abb827c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.208:5353: connect: connection refused" Jan 27 12:52:37 crc kubenswrapper[4900]: I0127 12:52:37.461638 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="5e07faff-922c-4bb3-a09e-e11135b3e369" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.209:8776/healthcheck\": dial tcp 10.217.0.209:8776: connect: connection refused" Jan 27 12:52:37 crc kubenswrapper[4900]: I0127 12:52:37.593650 4900 generic.go:334] "Generic (PLEG): container finished" podID="a5e02e88-5851-4d7a-93fe-39c857d7f2de" containerID="f0b725221906d9a65689350818bed6a5afc6d225bf2f300009ad498ceae59e57" exitCode=0 Jan 27 12:52:37 crc kubenswrapper[4900]: I0127 12:52:37.593923 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5bf6b97d5b-nvggn" event={"ID":"a5e02e88-5851-4d7a-93fe-39c857d7f2de","Type":"ContainerDied","Data":"f0b725221906d9a65689350818bed6a5afc6d225bf2f300009ad498ceae59e57"} Jan 27 12:52:39 crc kubenswrapper[4900]: I0127 12:52:39.134743 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-689d5949ff-jt4qp" podUID="a01e3768-681f-4cd6-8614-ecc16a8f5b0e" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.217:8004/healthcheck\": read tcp 10.217.0.2:43360->10.217.0.217:8004: read: connection reset by peer" Jan 27 12:52:39 crc kubenswrapper[4900]: I0127 12:52:39.136338 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-689d5949ff-jt4qp" podUID="a01e3768-681f-4cd6-8614-ecc16a8f5b0e" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.217:8004/healthcheck\": dial tcp 10.217.0.217:8004: connect: connection refused" Jan 27 12:52:39 crc kubenswrapper[4900]: I0127 12:52:39.617701 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:52:39 crc kubenswrapper[4900]: I0127 12:52:39.623027 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-758b8d686f-mcf4h" podUID="d305c1d2-157a-4980-8c1a-f02acd4bc99e" containerName="heat-cfnapi" probeResult="failure" output="Get \"http://10.217.0.216:8000/healthcheck\": read tcp 10.217.0.2:51976->10.217.0.216:8000: read: connection reset by peer" Jan 27 12:52:39 crc kubenswrapper[4900]: I0127 12:52:39.921575 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-758b8d686f-mcf4h" podUID="d305c1d2-157a-4980-8c1a-f02acd4bc99e" containerName="heat-cfnapi" probeResult="failure" output="Get \"http://10.217.0.216:8000/healthcheck\": dial tcp 10.217.0.216:8000: connect: connection refused" Jan 27 12:52:40 crc kubenswrapper[4900]: I0127 12:52:40.115816 4900 generic.go:334] "Generic (PLEG): container finished" podID="a01e3768-681f-4cd6-8614-ecc16a8f5b0e" containerID="3c2bb4855a6b6a583ceb15847108132eb4c62d810a0e93bc04a4d3948bb5f181" exitCode=0 Jan 27 12:52:40 crc kubenswrapper[4900]: I0127 12:52:40.115845 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-689d5949ff-jt4qp" event={"ID":"a01e3768-681f-4cd6-8614-ecc16a8f5b0e","Type":"ContainerDied","Data":"3c2bb4855a6b6a583ceb15847108132eb4c62d810a0e93bc04a4d3948bb5f181"} Jan 27 12:52:40 crc kubenswrapper[4900]: I0127 12:52:40.117557 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-nqj4r" podUID="78573fe2-69fe-42f2-9196-304828739af1" containerName="registry-server" probeResult="failure" output=< Jan 27 12:52:40 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 12:52:40 crc kubenswrapper[4900]: > Jan 27 12:52:40 crc kubenswrapper[4900]: I0127 12:52:40.119238 4900 generic.go:334] "Generic (PLEG): container finished" podID="d305c1d2-157a-4980-8c1a-f02acd4bc99e" containerID="07bb0c84e7a749595bd0af9497fa65642ef87c5d12ed762128d431f30625e718" exitCode=0 Jan 27 12:52:40 crc kubenswrapper[4900]: I0127 12:52:40.119275 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-758b8d686f-mcf4h" event={"ID":"d305c1d2-157a-4980-8c1a-f02acd4bc99e","Type":"ContainerDied","Data":"07bb0c84e7a749595bd0af9497fa65642ef87c5d12ed762128d431f30625e718"} Jan 27 12:52:40 crc kubenswrapper[4900]: I0127 12:52:40.120917 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-689d5949ff-jt4qp" podUID="a01e3768-681f-4cd6-8614-ecc16a8f5b0e" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.217:8004/healthcheck\": dial tcp 10.217.0.217:8004: connect: connection refused" Jan 27 12:52:40 crc kubenswrapper[4900]: E0127 12:52:40.331205 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified" Jan 27 12:52:40 crc kubenswrapper[4900]: E0127 12:52:40.331467 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstackclient,Image:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,Command:[/bin/sleep],Args:[infinity],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n8fhc4h557hf9h97h59ch588h5b7h74h554h55dh9ch66h657hcdh588h95hd8h59bhfh559hchc7hb8h575hbh5fdhcch659h547h674h5dcq,ValueFrom:nil,},EnvVar{Name:OS_CLOUD,Value:default,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_CA_CERT,Value:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_HOST,Value:metric-storage-prometheus.openstack.svc,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_PORT,Value:9090,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openstack-config,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/cloudrc,SubPath:cloudrc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f6jtl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42401,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42401,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstackclient_openstack(ded5eaa8-8d7d-4ee2-bad6-62da18024e33): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:52:40 crc kubenswrapper[4900]: E0127 12:52:40.332596 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstackclient" podUID="ded5eaa8-8d7d-4ee2-bad6-62da18024e33" Jan 27 12:52:40 crc kubenswrapper[4900]: I0127 12:52:40.993183 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.106115 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-dns-swift-storage-0\") pod \"67de2b7f-64d0-4a97-b742-18fc0abb827c\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.106492 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-config\") pod \"67de2b7f-64d0-4a97-b742-18fc0abb827c\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.106530 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z62zp\" (UniqueName: \"kubernetes.io/projected/67de2b7f-64d0-4a97-b742-18fc0abb827c-kube-api-access-z62zp\") pod \"67de2b7f-64d0-4a97-b742-18fc0abb827c\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.106610 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-ovsdbserver-nb\") pod \"67de2b7f-64d0-4a97-b742-18fc0abb827c\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.106763 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-dns-svc\") pod \"67de2b7f-64d0-4a97-b742-18fc0abb827c\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.106861 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-ovsdbserver-sb\") pod \"67de2b7f-64d0-4a97-b742-18fc0abb827c\" (UID: \"67de2b7f-64d0-4a97-b742-18fc0abb827c\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.120106 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67de2b7f-64d0-4a97-b742-18fc0abb827c-kube-api-access-z62zp" (OuterVolumeSpecName: "kube-api-access-z62zp") pod "67de2b7f-64d0-4a97-b742-18fc0abb827c" (UID: "67de2b7f-64d0-4a97-b742-18fc0abb827c"). InnerVolumeSpecName "kube-api-access-z62zp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.142313 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" event={"ID":"67de2b7f-64d0-4a97-b742-18fc0abb827c","Type":"ContainerDied","Data":"58eeec6cfa481b3a6b202e56b634ca5fef9161911760cfcb4c236be9a9054ac3"} Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.142384 4900 scope.go:117] "RemoveContainer" containerID="d38f55dfc8e94aa9f8e511c1d2ea8c1cff244e9f2614345e5eeb1ebad0610637" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.142574 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-89nlm" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.148771 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7cb9669c67-gp7qj" event={"ID":"668f25a4-c69e-4696-849d-166f82f28d00","Type":"ContainerStarted","Data":"1512aafce2a95e2d5dc06fb85f2496fb5ad97f8d8e17d67ef1e2cb70d78f6ee8"} Jan 27 12:52:41 crc kubenswrapper[4900]: E0127 12:52:41.155193 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified\\\"\"" pod="openstack/openstackclient" podUID="ded5eaa8-8d7d-4ee2-bad6-62da18024e33" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.284852 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z62zp\" (UniqueName: \"kubernetes.io/projected/67de2b7f-64d0-4a97-b742-18fc0abb827c-kube-api-access-z62zp\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.287360 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "67de2b7f-64d0-4a97-b742-18fc0abb827c" (UID: "67de2b7f-64d0-4a97-b742-18fc0abb827c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.348526 4900 scope.go:117] "RemoveContainer" containerID="bf4aea68e3588cbac8bebf035b3571e756c2e37efb375bffdcf2ff3edf93d4ac" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.374784 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "67de2b7f-64d0-4a97-b742-18fc0abb827c" (UID: "67de2b7f-64d0-4a97-b742-18fc0abb827c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.379492 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-config" (OuterVolumeSpecName: "config") pod "67de2b7f-64d0-4a97-b742-18fc0abb827c" (UID: "67de2b7f-64d0-4a97-b742-18fc0abb827c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.391455 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.391922 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.391932 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.398719 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "67de2b7f-64d0-4a97-b742-18fc0abb827c" (UID: "67de2b7f-64d0-4a97-b742-18fc0abb827c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.413972 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "67de2b7f-64d0-4a97-b742-18fc0abb827c" (UID: "67de2b7f-64d0-4a97-b742-18fc0abb827c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.539894 4900 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.540166 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67de2b7f-64d0-4a97-b742-18fc0abb827c-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.702546 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-89nlm"] Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.779309 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-89nlm"] Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.869779 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.896176 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.932547 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.964596 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.995278 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-combined-ca-bundle\") pod \"33045450-fef6-4a24-a926-c91d19c2fe02\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.995406 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t85pv\" (UniqueName: \"kubernetes.io/projected/5e07faff-922c-4bb3-a09e-e11135b3e369-kube-api-access-t85pv\") pod \"5e07faff-922c-4bb3-a09e-e11135b3e369\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.995528 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-config-data-custom\") pod \"5e07faff-922c-4bb3-a09e-e11135b3e369\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.995583 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-config-data\") pod \"5e07faff-922c-4bb3-a09e-e11135b3e369\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.995665 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-scripts\") pod \"5e07faff-922c-4bb3-a09e-e11135b3e369\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.995705 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5e07faff-922c-4bb3-a09e-e11135b3e369-etc-machine-id\") pod \"5e07faff-922c-4bb3-a09e-e11135b3e369\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.995747 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-sg-core-conf-yaml\") pod \"33045450-fef6-4a24-a926-c91d19c2fe02\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.995835 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-config-data-custom\") pod \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.995874 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-config-data\") pod \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.996123 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e07faff-922c-4bb3-a09e-e11135b3e369-logs\") pod \"5e07faff-922c-4bb3-a09e-e11135b3e369\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.996200 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-config-data-custom\") pod \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.996281 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6kd4\" (UniqueName: \"kubernetes.io/projected/d305c1d2-157a-4980-8c1a-f02acd4bc99e-kube-api-access-l6kd4\") pod \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.996459 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-config-data\") pod \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.996500 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tcwpp\" (UniqueName: \"kubernetes.io/projected/33045450-fef6-4a24-a926-c91d19c2fe02-kube-api-access-tcwpp\") pod \"33045450-fef6-4a24-a926-c91d19c2fe02\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.996569 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/33045450-fef6-4a24-a926-c91d19c2fe02-run-httpd\") pod \"33045450-fef6-4a24-a926-c91d19c2fe02\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.996620 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-combined-ca-bundle\") pod \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.996700 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-combined-ca-bundle\") pod \"5e07faff-922c-4bb3-a09e-e11135b3e369\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.996729 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-config-data\") pod \"33045450-fef6-4a24-a926-c91d19c2fe02\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.996755 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/33045450-fef6-4a24-a926-c91d19c2fe02-log-httpd\") pod \"33045450-fef6-4a24-a926-c91d19c2fe02\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.996816 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-combined-ca-bundle\") pod \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\" (UID: \"d305c1d2-157a-4980-8c1a-f02acd4bc99e\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.996876 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-scripts\") pod \"33045450-fef6-4a24-a926-c91d19c2fe02\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " Jan 27 12:52:41 crc kubenswrapper[4900]: I0127 12:52:41.996927 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nfsm7\" (UniqueName: \"kubernetes.io/projected/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-kube-api-access-nfsm7\") pod \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\" (UID: \"a01e3768-681f-4cd6-8614-ecc16a8f5b0e\") " Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.003744 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5e07faff-922c-4bb3-a09e-e11135b3e369-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5e07faff-922c-4bb3-a09e-e11135b3e369" (UID: "5e07faff-922c-4bb3-a09e-e11135b3e369"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.004238 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.011170 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-kube-api-access-nfsm7" (OuterVolumeSpecName: "kube-api-access-nfsm7") pod "a01e3768-681f-4cd6-8614-ecc16a8f5b0e" (UID: "a01e3768-681f-4cd6-8614-ecc16a8f5b0e"). InnerVolumeSpecName "kube-api-access-nfsm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.017338 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e07faff-922c-4bb3-a09e-e11135b3e369-logs" (OuterVolumeSpecName: "logs") pod "5e07faff-922c-4bb3-a09e-e11135b3e369" (UID: "5e07faff-922c-4bb3-a09e-e11135b3e369"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.019957 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33045450-fef6-4a24-a926-c91d19c2fe02-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "33045450-fef6-4a24-a926-c91d19c2fe02" (UID: "33045450-fef6-4a24-a926-c91d19c2fe02"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.020712 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33045450-fef6-4a24-a926-c91d19c2fe02-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "33045450-fef6-4a24-a926-c91d19c2fe02" (UID: "33045450-fef6-4a24-a926-c91d19c2fe02"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.031995 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-scripts" (OuterVolumeSpecName: "scripts") pod "33045450-fef6-4a24-a926-c91d19c2fe02" (UID: "33045450-fef6-4a24-a926-c91d19c2fe02"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.060556 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a01e3768-681f-4cd6-8614-ecc16a8f5b0e" (UID: "a01e3768-681f-4cd6-8614-ecc16a8f5b0e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.082746 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d305c1d2-157a-4980-8c1a-f02acd4bc99e" (UID: "d305c1d2-157a-4980-8c1a-f02acd4bc99e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.084758 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d305c1d2-157a-4980-8c1a-f02acd4bc99e-kube-api-access-l6kd4" (OuterVolumeSpecName: "kube-api-access-l6kd4") pod "d305c1d2-157a-4980-8c1a-f02acd4bc99e" (UID: "d305c1d2-157a-4980-8c1a-f02acd4bc99e"). InnerVolumeSpecName "kube-api-access-l6kd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.097586 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e07faff-922c-4bb3-a09e-e11135b3e369-kube-api-access-t85pv" (OuterVolumeSpecName: "kube-api-access-t85pv") pod "5e07faff-922c-4bb3-a09e-e11135b3e369" (UID: "5e07faff-922c-4bb3-a09e-e11135b3e369"). InnerVolumeSpecName "kube-api-access-t85pv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.098143 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5e07faff-922c-4bb3-a09e-e11135b3e369" (UID: "5e07faff-922c-4bb3-a09e-e11135b3e369"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.105604 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-scripts" (OuterVolumeSpecName: "scripts") pod "5e07faff-922c-4bb3-a09e-e11135b3e369" (UID: "5e07faff-922c-4bb3-a09e-e11135b3e369"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.109222 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33045450-fef6-4a24-a926-c91d19c2fe02-kube-api-access-tcwpp" (OuterVolumeSpecName: "kube-api-access-tcwpp") pod "33045450-fef6-4a24-a926-c91d19c2fe02" (UID: "33045450-fef6-4a24-a926-c91d19c2fe02"). InnerVolumeSpecName "kube-api-access-tcwpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.109680 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-config\") pod \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.109893 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-ovndb-tls-certs\") pod \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.109990 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-combined-ca-bundle\") pod \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.110348 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tcwpp\" (UniqueName: \"kubernetes.io/projected/33045450-fef6-4a24-a926-c91d19c2fe02-kube-api-access-tcwpp\") pod \"33045450-fef6-4a24-a926-c91d19c2fe02\" (UID: \"33045450-fef6-4a24-a926-c91d19c2fe02\") " Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.110579 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-httpd-config\") pod \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.110675 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wd4nt\" (UniqueName: \"kubernetes.io/projected/a5e02e88-5851-4d7a-93fe-39c857d7f2de-kube-api-access-wd4nt\") pod \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\" (UID: \"a5e02e88-5851-4d7a-93fe-39c857d7f2de\") " Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.110915 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-config-data-custom\") pod \"5e07faff-922c-4bb3-a09e-e11135b3e369\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.112421 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nfsm7\" (UniqueName: \"kubernetes.io/projected/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-kube-api-access-nfsm7\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.112453 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t85pv\" (UniqueName: \"kubernetes.io/projected/5e07faff-922c-4bb3-a09e-e11135b3e369-kube-api-access-t85pv\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.112469 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.112485 4900 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5e07faff-922c-4bb3-a09e-e11135b3e369-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.112499 4900 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.112512 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e07faff-922c-4bb3-a09e-e11135b3e369-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.112523 4900 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.112535 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6kd4\" (UniqueName: \"kubernetes.io/projected/d305c1d2-157a-4980-8c1a-f02acd4bc99e-kube-api-access-l6kd4\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.112549 4900 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/33045450-fef6-4a24-a926-c91d19c2fe02-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.112559 4900 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/33045450-fef6-4a24-a926-c91d19c2fe02-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.112570 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: W0127 12:52:42.112707 4900 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/5e07faff-922c-4bb3-a09e-e11135b3e369/volumes/kubernetes.io~secret/config-data-custom Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.112727 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5e07faff-922c-4bb3-a09e-e11135b3e369" (UID: "5e07faff-922c-4bb3-a09e-e11135b3e369"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: W0127 12:52:42.112813 4900 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/33045450-fef6-4a24-a926-c91d19c2fe02/volumes/kubernetes.io~projected/kube-api-access-tcwpp Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.112979 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33045450-fef6-4a24-a926-c91d19c2fe02-kube-api-access-tcwpp" (OuterVolumeSpecName: "kube-api-access-tcwpp") pod "33045450-fef6-4a24-a926-c91d19c2fe02" (UID: "33045450-fef6-4a24-a926-c91d19c2fe02"). InnerVolumeSpecName "kube-api-access-tcwpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.126465 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "a5e02e88-5851-4d7a-93fe-39c857d7f2de" (UID: "a5e02e88-5851-4d7a-93fe-39c857d7f2de"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.127980 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5e02e88-5851-4d7a-93fe-39c857d7f2de-kube-api-access-wd4nt" (OuterVolumeSpecName: "kube-api-access-wd4nt") pod "a5e02e88-5851-4d7a-93fe-39c857d7f2de" (UID: "a5e02e88-5851-4d7a-93fe-39c857d7f2de"). InnerVolumeSpecName "kube-api-access-wd4nt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.170281 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5e07faff-922c-4bb3-a09e-e11135b3e369","Type":"ContainerDied","Data":"4b2ea7c74f814a052eb2353cda47350da1308fd9b0dc66ee0f4e4afe50d89fe1"} Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.170346 4900 scope.go:117] "RemoveContainer" containerID="e37ceed8d91756b720f2c156156acea0cdf7a311ddadcd863a77eb716eed968f" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.170508 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.180240 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"33045450-fef6-4a24-a926-c91d19c2fe02","Type":"ContainerDied","Data":"2a5332746527779a21ac8f28d29d255b73addf451a262048cd4ab802425334eb"} Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.180365 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.226896 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5bf6b97d5b-nvggn" event={"ID":"a5e02e88-5851-4d7a-93fe-39c857d7f2de","Type":"ContainerDied","Data":"14dcee93e5085057492fe5ac53fe19aa680d01e0b89cb447830ae9f60b8b8743"} Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.227263 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5bf6b97d5b-nvggn" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.244462 4900 scope.go:117] "RemoveContainer" containerID="5faba2fc732b648231d6569a8f588e69834f668a745435fe267df4576c220e15" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.247701 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-689d5949ff-jt4qp" event={"ID":"a01e3768-681f-4cd6-8614-ecc16a8f5b0e","Type":"ContainerDied","Data":"85d6c36ff26f350bc361ade16022732a53716a487845c617c6b36a77ad9a1369"} Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.247811 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-689d5949ff-jt4qp" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.253118 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5e07faff-922c-4bb3-a09e-e11135b3e369" (UID: "5e07faff-922c-4bb3-a09e-e11135b3e369"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.271304 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-combined-ca-bundle\") pod \"5e07faff-922c-4bb3-a09e-e11135b3e369\" (UID: \"5e07faff-922c-4bb3-a09e-e11135b3e369\") " Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.272166 4900 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.272186 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tcwpp\" (UniqueName: \"kubernetes.io/projected/33045450-fef6-4a24-a926-c91d19c2fe02-kube-api-access-tcwpp\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.272199 4900 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.272208 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wd4nt\" (UniqueName: \"kubernetes.io/projected/a5e02e88-5851-4d7a-93fe-39c857d7f2de-kube-api-access-wd4nt\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: W0127 12:52:42.272290 4900 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/5e07faff-922c-4bb3-a09e-e11135b3e369/volumes/kubernetes.io~secret/combined-ca-bundle Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.272302 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5e07faff-922c-4bb3-a09e-e11135b3e369" (UID: "5e07faff-922c-4bb3-a09e-e11135b3e369"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.280426 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7cb9669c67-gp7qj" event={"ID":"668f25a4-c69e-4696-849d-166f82f28d00","Type":"ContainerStarted","Data":"4f513b1c2d3df2e46321fd5eb1d5b168c00bd37eeb846474097fa669f72d3e76"} Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.281254 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.281324 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.290507 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d305c1d2-157a-4980-8c1a-f02acd4bc99e" (UID: "d305c1d2-157a-4980-8c1a-f02acd4bc99e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.293123 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-758b8d686f-mcf4h" event={"ID":"d305c1d2-157a-4980-8c1a-f02acd4bc99e","Type":"ContainerDied","Data":"7f5cf233d589dd93ec093d5a1afa4917be7047c7999418ececcf6664574d051e"} Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.293238 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-758b8d686f-mcf4h" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.386187 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-7cb9669c67-gp7qj" podStartSLOduration=16.386145918 podStartE2EDuration="16.386145918s" podCreationTimestamp="2026-01-27 12:52:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:52:42.319961288 +0000 UTC m=+1589.556989508" watchObservedRunningTime="2026-01-27 12:52:42.386145918 +0000 UTC m=+1589.623174128" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.388454 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.388588 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.397275 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-config" (OuterVolumeSpecName: "config") pod "a5e02e88-5851-4d7a-93fe-39c857d7f2de" (UID: "a5e02e88-5851-4d7a-93fe-39c857d7f2de"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.398393 4900 scope.go:117] "RemoveContainer" containerID="58bf39b05777fadec75f938969df1684c256309712db6ae44ef0d01e2a406b7e" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.406255 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-config-data" (OuterVolumeSpecName: "config-data") pod "5e07faff-922c-4bb3-a09e-e11135b3e369" (UID: "5e07faff-922c-4bb3-a09e-e11135b3e369"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.428229 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a01e3768-681f-4cd6-8614-ecc16a8f5b0e" (UID: "a01e3768-681f-4cd6-8614-ecc16a8f5b0e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.434306 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "33045450-fef6-4a24-a926-c91d19c2fe02" (UID: "33045450-fef6-4a24-a926-c91d19c2fe02"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.449924 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-config-data" (OuterVolumeSpecName: "config-data") pod "d305c1d2-157a-4980-8c1a-f02acd4bc99e" (UID: "d305c1d2-157a-4980-8c1a-f02acd4bc99e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.471347 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "a5e02e88-5851-4d7a-93fe-39c857d7f2de" (UID: "a5e02e88-5851-4d7a-93fe-39c857d7f2de"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.472351 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a5e02e88-5851-4d7a-93fe-39c857d7f2de" (UID: "a5e02e88-5851-4d7a-93fe-39c857d7f2de"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.492442 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e07faff-922c-4bb3-a09e-e11135b3e369-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.519426 4900 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.519571 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.519670 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d305c1d2-157a-4980-8c1a-f02acd4bc99e-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.519759 4900 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.519854 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5e02e88-5851-4d7a-93fe-39c857d7f2de-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.519943 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.571730 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "33045450-fef6-4a24-a926-c91d19c2fe02" (UID: "33045450-fef6-4a24-a926-c91d19c2fe02"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.584653 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67de2b7f-64d0-4a97-b742-18fc0abb827c" path="/var/lib/kubelet/pods/67de2b7f-64d0-4a97-b742-18fc0abb827c/volumes" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.623487 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.626935 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-config-data" (OuterVolumeSpecName: "config-data") pod "a01e3768-681f-4cd6-8614-ecc16a8f5b0e" (UID: "a01e3768-681f-4cd6-8614-ecc16a8f5b0e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.677474 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-config-data" (OuterVolumeSpecName: "config-data") pod "33045450-fef6-4a24-a926-c91d19c2fe02" (UID: "33045450-fef6-4a24-a926-c91d19c2fe02"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.727895 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a01e3768-681f-4cd6-8614-ecc16a8f5b0e-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.728263 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33045450-fef6-4a24-a926-c91d19c2fe02-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.880784 4900 scope.go:117] "RemoveContainer" containerID="e8efa6c66a1b88295c17777f8906520c4063eb3e6714590fae2fe2c1e524121c" Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.951812 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7c966d6899-4tntc"] Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.951864 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-55dc67899d-kc667"] Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.951886 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sq822"] Jan 27 12:52:42 crc kubenswrapper[4900]: I0127 12:52:42.951994 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-6f85ff8cdd-9lj8l"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.007484 4900 scope.go:117] "RemoveContainer" containerID="255c80b80399b7f9ab2b447d472f0a9004a1e964b6a080dff6a20e18fa89fdf0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.034820 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-799b6c7dd6-cz9kf"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.064561 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5b76ffc8f4-6qc4h"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.207538 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.224532 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.267320 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:52:43 crc kubenswrapper[4900]: E0127 12:52:43.267905 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="ceilometer-central-agent" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.267925 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="ceilometer-central-agent" Jan 27 12:52:43 crc kubenswrapper[4900]: E0127 12:52:43.267938 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="ceilometer-notification-agent" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.267945 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="ceilometer-notification-agent" Jan 27 12:52:43 crc kubenswrapper[4900]: E0127 12:52:43.267961 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="proxy-httpd" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.267967 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="proxy-httpd" Jan 27 12:52:43 crc kubenswrapper[4900]: E0127 12:52:43.267976 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67de2b7f-64d0-4a97-b742-18fc0abb827c" containerName="init" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.267982 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="67de2b7f-64d0-4a97-b742-18fc0abb827c" containerName="init" Jan 27 12:52:43 crc kubenswrapper[4900]: E0127 12:52:43.267997 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e07faff-922c-4bb3-a09e-e11135b3e369" containerName="cinder-api-log" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268003 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e07faff-922c-4bb3-a09e-e11135b3e369" containerName="cinder-api-log" Jan 27 12:52:43 crc kubenswrapper[4900]: E0127 12:52:43.268014 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67de2b7f-64d0-4a97-b742-18fc0abb827c" containerName="dnsmasq-dns" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268019 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="67de2b7f-64d0-4a97-b742-18fc0abb827c" containerName="dnsmasq-dns" Jan 27 12:52:43 crc kubenswrapper[4900]: E0127 12:52:43.268033 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5e02e88-5851-4d7a-93fe-39c857d7f2de" containerName="neutron-httpd" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268042 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5e02e88-5851-4d7a-93fe-39c857d7f2de" containerName="neutron-httpd" Jan 27 12:52:43 crc kubenswrapper[4900]: E0127 12:52:43.268105 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5e02e88-5851-4d7a-93fe-39c857d7f2de" containerName="neutron-api" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268111 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5e02e88-5851-4d7a-93fe-39c857d7f2de" containerName="neutron-api" Jan 27 12:52:43 crc kubenswrapper[4900]: E0127 12:52:43.268120 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="sg-core" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268125 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="sg-core" Jan 27 12:52:43 crc kubenswrapper[4900]: E0127 12:52:43.268137 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d305c1d2-157a-4980-8c1a-f02acd4bc99e" containerName="heat-cfnapi" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268144 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="d305c1d2-157a-4980-8c1a-f02acd4bc99e" containerName="heat-cfnapi" Jan 27 12:52:43 crc kubenswrapper[4900]: E0127 12:52:43.268156 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a01e3768-681f-4cd6-8614-ecc16a8f5b0e" containerName="heat-api" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268161 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a01e3768-681f-4cd6-8614-ecc16a8f5b0e" containerName="heat-api" Jan 27 12:52:43 crc kubenswrapper[4900]: E0127 12:52:43.268174 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e07faff-922c-4bb3-a09e-e11135b3e369" containerName="cinder-api" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268181 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e07faff-922c-4bb3-a09e-e11135b3e369" containerName="cinder-api" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268395 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5e02e88-5851-4d7a-93fe-39c857d7f2de" containerName="neutron-httpd" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268410 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5e02e88-5851-4d7a-93fe-39c857d7f2de" containerName="neutron-api" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268420 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="sg-core" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268433 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="d305c1d2-157a-4980-8c1a-f02acd4bc99e" containerName="heat-cfnapi" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268440 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="proxy-httpd" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268454 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a01e3768-681f-4cd6-8614-ecc16a8f5b0e" containerName="heat-api" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268465 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e07faff-922c-4bb3-a09e-e11135b3e369" containerName="cinder-api" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268472 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="ceilometer-notification-agent" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268480 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="67de2b7f-64d0-4a97-b742-18fc0abb827c" containerName="dnsmasq-dns" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268486 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" containerName="ceilometer-central-agent" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.268498 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e07faff-922c-4bb3-a09e-e11135b3e369" containerName="cinder-api-log" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.269635 4900 scope.go:117] "RemoveContainer" containerID="d18819e0069d49a28d9cec8272e82136f3b390380a58369a4f08d2d98da08f9a" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.271428 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.275213 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.275353 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.335106 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-689d5949ff-jt4qp"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.399798 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-scripts\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.400288 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrss8\" (UniqueName: \"kubernetes.io/projected/3521fa03-6a1d-4bd6-90ed-2802291f5949-kube-api-access-rrss8\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.400939 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.401589 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.401718 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3521fa03-6a1d-4bd6-90ed-2802291f5949-run-httpd\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.402010 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3521fa03-6a1d-4bd6-90ed-2802291f5949-log-httpd\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.402122 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-config-data\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.427191 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-689d5949ff-jt4qp"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.470144 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.483647 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7cb9669c67-gp7qj" event={"ID":"668f25a4-c69e-4696-849d-166f82f28d00","Type":"ContainerStarted","Data":"063e069ee3b7748d205373213c0719db460f888ffef4bf959e2030a356eb8fc9"} Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.488836 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7c966d6899-4tntc" event={"ID":"39372723-3fe3-4691-b1ed-16d971262f96","Type":"ContainerStarted","Data":"2b597b41daf97691aa4b8f07b6e916b90314c06e8cf47454db039df0e6458003"} Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.496979 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5b76ffc8f4-6qc4h" event={"ID":"33681235-9f03-4254-9658-5948d6a9db37","Type":"ContainerStarted","Data":"c4fb5c343603c064c6bbb05d35017d945ad9d3db807b7a038365d0ace97aaf56"} Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.518171 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6f85ff8cdd-9lj8l" event={"ID":"b44caf11-0144-4419-a3fd-49a686e81f0a","Type":"ContainerStarted","Data":"3de59d9bb9e1c13d73bc69f0ef5081f6b57b1d8c564441ef6777841d8b4da668"} Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.526417 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5bf6b97d5b-nvggn"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.529040 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.529198 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.529251 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3521fa03-6a1d-4bd6-90ed-2802291f5949-run-httpd\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.529281 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3521fa03-6a1d-4bd6-90ed-2802291f5949-log-httpd\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.529318 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-config-data\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.529395 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-scripts\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.529466 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrss8\" (UniqueName: \"kubernetes.io/projected/3521fa03-6a1d-4bd6-90ed-2802291f5949-kube-api-access-rrss8\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.531339 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-55dc67899d-kc667" event={"ID":"eed3fb3f-18d6-4382-85f1-77235d870e91","Type":"ContainerStarted","Data":"4c2f59662c3f382fab739515386eb317d39939dfc44cb92a96b732159b02746e"} Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.531666 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3521fa03-6a1d-4bd6-90ed-2802291f5949-run-httpd\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.532348 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3521fa03-6a1d-4bd6-90ed-2802291f5949-log-httpd\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.541544 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sq822" event={"ID":"157a4f73-69ff-48a5-a97b-e78693201660","Type":"ContainerStarted","Data":"aed5315d3fa888c3603a215e1c7b789a1bf6543b75f3645a8a6a113e6997591b"} Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.552330 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.552795 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-scripts\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.554902 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" event={"ID":"d30b65d6-3363-496a-97c9-8983b5332464","Type":"ContainerStarted","Data":"caf1c1201a0bcd4fcacaf2e12b8b46ed183c90ae41e06da2bd6b2282f44aa050"} Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.554927 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-config-data\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.560683 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrss8\" (UniqueName: \"kubernetes.io/projected/3521fa03-6a1d-4bd6-90ed-2802291f5949-kube-api-access-rrss8\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.563092 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " pod="openstack/ceilometer-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.590186 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5bf6b97d5b-nvggn"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.613949 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 27 12:52:43 crc kubenswrapper[4900]: E0127 12:52:43.616450 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33045450_fef6_4a24_a926_c91d19c2fe02.slice/crio-2a5332746527779a21ac8f28d29d255b73addf451a262048cd4ab802425334eb\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5e02e88_5851_4d7a_93fe_39c857d7f2de.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda01e3768_681f_4cd6_8614_ecc16a8f5b0e.slice/crio-85d6c36ff26f350bc361ade16022732a53716a487845c617c6b36a77ad9a1369\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33045450_fef6_4a24_a926_c91d19c2fe02.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda01e3768_681f_4cd6_8614_ecc16a8f5b0e.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5e07faff_922c_4bb3_a09e_e11135b3e369.slice/crio-4b2ea7c74f814a052eb2353cda47350da1308fd9b0dc66ee0f4e4afe50d89fe1\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd305c1d2_157a_4980_8c1a_f02acd4bc99e.slice/crio-7f5cf233d589dd93ec093d5a1afa4917be7047c7999418ececcf6664574d051e\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5e02e88_5851_4d7a_93fe_39c857d7f2de.slice/crio-14dcee93e5085057492fe5ac53fe19aa680d01e0b89cb447830ae9f60b8b8743\": RecentStats: unable to find data in memory cache]" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.630721 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.649888 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.653517 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.656492 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.656749 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.658038 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.681978 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-758b8d686f-mcf4h"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.703143 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-758b8d686f-mcf4h"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.721559 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.735951 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-scripts\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.736086 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.736305 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wms25\" (UniqueName: \"kubernetes.io/projected/49ffda39-1561-49d2-a67d-ad7da16103b2-kube-api-access-wms25\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.736470 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.736527 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-config-data\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.736708 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/49ffda39-1561-49d2-a67d-ad7da16103b2-etc-machine-id\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.736751 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-config-data-custom\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.736965 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-public-tls-certs\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.737099 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49ffda39-1561-49d2-a67d-ad7da16103b2-logs\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.841573 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.841678 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-scripts\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.841730 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wms25\" (UniqueName: \"kubernetes.io/projected/49ffda39-1561-49d2-a67d-ad7da16103b2-kube-api-access-wms25\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.841831 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.841868 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-config-data\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.842047 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/49ffda39-1561-49d2-a67d-ad7da16103b2-etc-machine-id\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.842116 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-config-data-custom\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.842352 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-public-tls-certs\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.842420 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49ffda39-1561-49d2-a67d-ad7da16103b2-logs\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.843252 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49ffda39-1561-49d2-a67d-ad7da16103b2-logs\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.846868 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/49ffda39-1561-49d2-a67d-ad7da16103b2-etc-machine-id\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.858173 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-config-data\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.868631 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-config-data-custom\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.868694 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-scripts\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.870810 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.871004 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-public-tls-certs\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.871462 4900 scope.go:117] "RemoveContainer" containerID="c57a1f196de92f004e5836fa5b3e9a73328a8ac375d79a67ea6ae82c79b0b3e7" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.873212 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49ffda39-1561-49d2-a67d-ad7da16103b2-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:43 crc kubenswrapper[4900]: I0127 12:52:43.873585 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wms25\" (UniqueName: \"kubernetes.io/projected/49ffda39-1561-49d2-a67d-ad7da16103b2-kube-api-access-wms25\") pod \"cinder-api-0\" (UID: \"49ffda39-1561-49d2-a67d-ad7da16103b2\") " pod="openstack/cinder-api-0" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.140199 4900 scope.go:117] "RemoveContainer" containerID="f0b725221906d9a65689350818bed6a5afc6d225bf2f300009ad498ceae59e57" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.152963 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.153310 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.174515 4900 scope.go:117] "RemoveContainer" containerID="3c2bb4855a6b6a583ceb15847108132eb4c62d810a0e93bc04a4d3948bb5f181" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.352336 4900 scope.go:117] "RemoveContainer" containerID="07bb0c84e7a749595bd0af9497fa65642ef87c5d12ed762128d431f30625e718" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.560300 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33045450-fef6-4a24-a926-c91d19c2fe02" path="/var/lib/kubelet/pods/33045450-fef6-4a24-a926-c91d19c2fe02/volumes" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.561613 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e07faff-922c-4bb3-a09e-e11135b3e369" path="/var/lib/kubelet/pods/5e07faff-922c-4bb3-a09e-e11135b3e369/volumes" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.562745 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a01e3768-681f-4cd6-8614-ecc16a8f5b0e" path="/var/lib/kubelet/pods/a01e3768-681f-4cd6-8614-ecc16a8f5b0e/volumes" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.563983 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5e02e88-5851-4d7a-93fe-39c857d7f2de" path="/var/lib/kubelet/pods/a5e02e88-5851-4d7a-93fe-39c857d7f2de/volumes" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.564807 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d305c1d2-157a-4980-8c1a-f02acd4bc99e" path="/var/lib/kubelet/pods/d305c1d2-157a-4980-8c1a-f02acd4bc99e/volumes" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.619643 4900 generic.go:334] "Generic (PLEG): container finished" podID="157a4f73-69ff-48a5-a97b-e78693201660" containerID="0dc6a83ed35ec0a9ea55ee2fdc218c1aa9f85b453165941ad9c1938d886a5a32" exitCode=0 Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.621010 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sq822" event={"ID":"157a4f73-69ff-48a5-a97b-e78693201660","Type":"ContainerDied","Data":"0dc6a83ed35ec0a9ea55ee2fdc218c1aa9f85b453165941ad9c1938d886a5a32"} Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.634757 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" event={"ID":"d30b65d6-3363-496a-97c9-8983b5332464","Type":"ContainerStarted","Data":"598bc50f504dcb77d53ea08302261e3f4e53ab9a648ac6488d9735d738e289ac"} Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.636365 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.653743 4900 generic.go:334] "Generic (PLEG): container finished" podID="39372723-3fe3-4691-b1ed-16d971262f96" containerID="f593b90da2aafbbef5f391c67c4ad275bbb7ac95fa12c224d5137e9815e84c65" exitCode=1 Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.653834 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7c966d6899-4tntc" event={"ID":"39372723-3fe3-4691-b1ed-16d971262f96","Type":"ContainerDied","Data":"f593b90da2aafbbef5f391c67c4ad275bbb7ac95fa12c224d5137e9815e84c65"} Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.655191 4900 scope.go:117] "RemoveContainer" containerID="f593b90da2aafbbef5f391c67c4ad275bbb7ac95fa12c224d5137e9815e84c65" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.655642 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.655698 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.700718 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6f85ff8cdd-9lj8l" event={"ID":"b44caf11-0144-4419-a3fd-49a686e81f0a","Type":"ContainerStarted","Data":"48f6f2cbc0d29460a39333c7fa6f45fcf8da6d16bbb2fd0bb2d583c0dc7af4b0"} Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.702953 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.707852 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" podStartSLOduration=12.707825078 podStartE2EDuration="12.707825078s" podCreationTimestamp="2026-01-27 12:52:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:52:44.671012654 +0000 UTC m=+1591.908040864" watchObservedRunningTime="2026-01-27 12:52:44.707825078 +0000 UTC m=+1591.944853288" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.738220 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.748847 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-6f85ff8cdd-9lj8l" podStartSLOduration=11.748823613 podStartE2EDuration="11.748823613s" podCreationTimestamp="2026-01-27 12:52:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:52:44.734773323 +0000 UTC m=+1591.971801533" watchObservedRunningTime="2026-01-27 12:52:44.748823613 +0000 UTC m=+1591.985851823" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.759977 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-55dc67899d-kc667" event={"ID":"eed3fb3f-18d6-4382-85f1-77235d870e91","Type":"ContainerStarted","Data":"f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef"} Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.761194 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.783622 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5b76ffc8f4-6qc4h" podStartSLOduration=15.783592366 podStartE2EDuration="15.783592366s" podCreationTimestamp="2026-01-27 12:52:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:52:44.76519753 +0000 UTC m=+1592.002225740" watchObservedRunningTime="2026-01-27 12:52:44.783592366 +0000 UTC m=+1592.020620576" Jan 27 12:52:44 crc kubenswrapper[4900]: I0127 12:52:44.806452 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-55dc67899d-kc667" podStartSLOduration=15.806426752 podStartE2EDuration="15.806426752s" podCreationTimestamp="2026-01-27 12:52:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:52:44.794648409 +0000 UTC m=+1592.031676619" watchObservedRunningTime="2026-01-27 12:52:44.806426752 +0000 UTC m=+1592.043454982" Jan 27 12:52:45 crc kubenswrapper[4900]: I0127 12:52:45.075720 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 27 12:52:45 crc kubenswrapper[4900]: W0127 12:52:45.119376 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49ffda39_1561_49d2_a67d_ad7da16103b2.slice/crio-22bbc36dd1afbd19c480f243e5733473c15e2382c5a3534087266ae20dbd3e1f WatchSource:0}: Error finding container 22bbc36dd1afbd19c480f243e5733473c15e2382c5a3534087266ae20dbd3e1f: Status 404 returned error can't find the container with id 22bbc36dd1afbd19c480f243e5733473c15e2382c5a3534087266ae20dbd3e1f Jan 27 12:52:45 crc kubenswrapper[4900]: I0127 12:52:45.127475 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:52:45 crc kubenswrapper[4900]: I0127 12:52:45.804159 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3521fa03-6a1d-4bd6-90ed-2802291f5949","Type":"ContainerStarted","Data":"c213da95e4177165306861d8da8540854ee1794b06e36253ba67779344d16b61"} Jan 27 12:52:45 crc kubenswrapper[4900]: I0127 12:52:45.812078 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"49ffda39-1561-49d2-a67d-ad7da16103b2","Type":"ContainerStarted","Data":"22bbc36dd1afbd19c480f243e5733473c15e2382c5a3534087266ae20dbd3e1f"} Jan 27 12:52:45 crc kubenswrapper[4900]: I0127 12:52:45.877647 4900 generic.go:334] "Generic (PLEG): container finished" podID="33681235-9f03-4254-9658-5948d6a9db37" containerID="8dd6cef822e3e1ea49a83e510a2adc38b8773cba3075154401987e75a3630b07" exitCode=1 Jan 27 12:52:45 crc kubenswrapper[4900]: I0127 12:52:45.878699 4900 scope.go:117] "RemoveContainer" containerID="8dd6cef822e3e1ea49a83e510a2adc38b8773cba3075154401987e75a3630b07" Jan 27 12:52:45 crc kubenswrapper[4900]: I0127 12:52:45.879888 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5b76ffc8f4-6qc4h" event={"ID":"33681235-9f03-4254-9658-5948d6a9db37","Type":"ContainerDied","Data":"8dd6cef822e3e1ea49a83e510a2adc38b8773cba3075154401987e75a3630b07"} Jan 27 12:52:46 crc kubenswrapper[4900]: I0127 12:52:46.906407 4900 generic.go:334] "Generic (PLEG): container finished" podID="39372723-3fe3-4691-b1ed-16d971262f96" containerID="706172a72774f2fd5baa9552e1ce4a1b2672708b13b01f91fa0dc25b76c1de4f" exitCode=1 Jan 27 12:52:46 crc kubenswrapper[4900]: I0127 12:52:46.906568 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7c966d6899-4tntc" event={"ID":"39372723-3fe3-4691-b1ed-16d971262f96","Type":"ContainerDied","Data":"706172a72774f2fd5baa9552e1ce4a1b2672708b13b01f91fa0dc25b76c1de4f"} Jan 27 12:52:46 crc kubenswrapper[4900]: I0127 12:52:46.906930 4900 scope.go:117] "RemoveContainer" containerID="f593b90da2aafbbef5f391c67c4ad275bbb7ac95fa12c224d5137e9815e84c65" Jan 27 12:52:46 crc kubenswrapper[4900]: I0127 12:52:46.907397 4900 scope.go:117] "RemoveContainer" containerID="706172a72774f2fd5baa9552e1ce4a1b2672708b13b01f91fa0dc25b76c1de4f" Jan 27 12:52:46 crc kubenswrapper[4900]: E0127 12:52:46.907721 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7c966d6899-4tntc_openstack(39372723-3fe3-4691-b1ed-16d971262f96)\"" pod="openstack/heat-cfnapi-7c966d6899-4tntc" podUID="39372723-3fe3-4691-b1ed-16d971262f96" Jan 27 12:52:46 crc kubenswrapper[4900]: I0127 12:52:46.916864 4900 generic.go:334] "Generic (PLEG): container finished" podID="33681235-9f03-4254-9658-5948d6a9db37" containerID="3dfff72645a7c79fb7b2dad52bfe3a82fb405d57e788e3228585b4a1ec3b718e" exitCode=1 Jan 27 12:52:46 crc kubenswrapper[4900]: I0127 12:52:46.916975 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5b76ffc8f4-6qc4h" event={"ID":"33681235-9f03-4254-9658-5948d6a9db37","Type":"ContainerDied","Data":"3dfff72645a7c79fb7b2dad52bfe3a82fb405d57e788e3228585b4a1ec3b718e"} Jan 27 12:52:46 crc kubenswrapper[4900]: I0127 12:52:46.917546 4900 scope.go:117] "RemoveContainer" containerID="3dfff72645a7c79fb7b2dad52bfe3a82fb405d57e788e3228585b4a1ec3b718e" Jan 27 12:52:46 crc kubenswrapper[4900]: I0127 12:52:46.923868 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3521fa03-6a1d-4bd6-90ed-2802291f5949","Type":"ContainerStarted","Data":"2fca4dd6f4fa1e6ff8ae3544b3db5ac191bb6b626d0f77460386de3b51998409"} Jan 27 12:52:46 crc kubenswrapper[4900]: E0127 12:52:46.926928 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-5b76ffc8f4-6qc4h_openstack(33681235-9f03-4254-9658-5948d6a9db37)\"" pod="openstack/heat-api-5b76ffc8f4-6qc4h" podUID="33681235-9f03-4254-9658-5948d6a9db37" Jan 27 12:52:46 crc kubenswrapper[4900]: I0127 12:52:46.956683 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"49ffda39-1561-49d2-a67d-ad7da16103b2","Type":"ContainerStarted","Data":"426132a3a069f07f99e7cb244eb0f9c8d4ca58af2faa9b2560d8aae8cb91288b"} Jan 27 12:52:47 crc kubenswrapper[4900]: I0127 12:52:47.049244 4900 scope.go:117] "RemoveContainer" containerID="8dd6cef822e3e1ea49a83e510a2adc38b8773cba3075154401987e75a3630b07" Jan 27 12:52:47 crc kubenswrapper[4900]: I0127 12:52:47.734033 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:47 crc kubenswrapper[4900]: I0127 12:52:47.763935 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7cb9669c67-gp7qj" Jan 27 12:52:48 crc kubenswrapper[4900]: I0127 12:52:48.041799 4900 scope.go:117] "RemoveContainer" containerID="3dfff72645a7c79fb7b2dad52bfe3a82fb405d57e788e3228585b4a1ec3b718e" Jan 27 12:52:48 crc kubenswrapper[4900]: E0127 12:52:48.042990 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-5b76ffc8f4-6qc4h_openstack(33681235-9f03-4254-9658-5948d6a9db37)\"" pod="openstack/heat-api-5b76ffc8f4-6qc4h" podUID="33681235-9f03-4254-9658-5948d6a9db37" Jan 27 12:52:48 crc kubenswrapper[4900]: I0127 12:52:48.061759 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3521fa03-6a1d-4bd6-90ed-2802291f5949","Type":"ContainerStarted","Data":"c1678ebc3979a5407fafa350da2da2447438e4d5bba994578d21f3ae7d22dc06"} Jan 27 12:52:48 crc kubenswrapper[4900]: I0127 12:52:48.081509 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"49ffda39-1561-49d2-a67d-ad7da16103b2","Type":"ContainerStarted","Data":"a1b77ca044fd5f927b2a89d20c5640a3a1eef7cb7822251139c92c87f75c7248"} Jan 27 12:52:48 crc kubenswrapper[4900]: I0127 12:52:48.082418 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 27 12:52:48 crc kubenswrapper[4900]: I0127 12:52:48.129277 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sq822" event={"ID":"157a4f73-69ff-48a5-a97b-e78693201660","Type":"ContainerStarted","Data":"b1796cdb447cf0f399fbe4ea02b2f019bba7188634e061e82a3b5dc4e09bdbc5"} Jan 27 12:52:48 crc kubenswrapper[4900]: I0127 12:52:48.141357 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.141324279 podStartE2EDuration="5.141324279s" podCreationTimestamp="2026-01-27 12:52:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:52:48.11770136 +0000 UTC m=+1595.354729590" watchObservedRunningTime="2026-01-27 12:52:48.141324279 +0000 UTC m=+1595.378352489" Jan 27 12:52:48 crc kubenswrapper[4900]: I0127 12:52:48.165537 4900 scope.go:117] "RemoveContainer" containerID="706172a72774f2fd5baa9552e1ce4a1b2672708b13b01f91fa0dc25b76c1de4f" Jan 27 12:52:48 crc kubenswrapper[4900]: E0127 12:52:48.166188 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7c966d6899-4tntc_openstack(39372723-3fe3-4691-b1ed-16d971262f96)\"" pod="openstack/heat-cfnapi-7c966d6899-4tntc" podUID="39372723-3fe3-4691-b1ed-16d971262f96" Jan 27 12:52:48 crc kubenswrapper[4900]: I0127 12:52:48.543389 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:48 crc kubenswrapper[4900]: I0127 12:52:48.608811 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:49 crc kubenswrapper[4900]: I0127 12:52:49.196091 4900 generic.go:334] "Generic (PLEG): container finished" podID="157a4f73-69ff-48a5-a97b-e78693201660" containerID="b1796cdb447cf0f399fbe4ea02b2f019bba7188634e061e82a3b5dc4e09bdbc5" exitCode=0 Jan 27 12:52:49 crc kubenswrapper[4900]: I0127 12:52:49.196638 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sq822" event={"ID":"157a4f73-69ff-48a5-a97b-e78693201660","Type":"ContainerDied","Data":"b1796cdb447cf0f399fbe4ea02b2f019bba7188634e061e82a3b5dc4e09bdbc5"} Jan 27 12:52:49 crc kubenswrapper[4900]: I0127 12:52:49.291419 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nqj4r"] Jan 27 12:52:49 crc kubenswrapper[4900]: I0127 12:52:49.570785 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:49 crc kubenswrapper[4900]: I0127 12:52:49.632327 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:49 crc kubenswrapper[4900]: I0127 12:52:49.657326 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:49 crc kubenswrapper[4900]: I0127 12:52:49.657973 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:49 crc kubenswrapper[4900]: I0127 12:52:49.658603 4900 scope.go:117] "RemoveContainer" containerID="706172a72774f2fd5baa9552e1ce4a1b2672708b13b01f91fa0dc25b76c1de4f" Jan 27 12:52:49 crc kubenswrapper[4900]: E0127 12:52:49.658958 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7c966d6899-4tntc_openstack(39372723-3fe3-4691-b1ed-16d971262f96)\"" pod="openstack/heat-cfnapi-7c966d6899-4tntc" podUID="39372723-3fe3-4691-b1ed-16d971262f96" Jan 27 12:52:49 crc kubenswrapper[4900]: I0127 12:52:49.686551 4900 scope.go:117] "RemoveContainer" containerID="3dfff72645a7c79fb7b2dad52bfe3a82fb405d57e788e3228585b4a1ec3b718e" Jan 27 12:52:49 crc kubenswrapper[4900]: E0127 12:52:49.686918 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-5b76ffc8f4-6qc4h_openstack(33681235-9f03-4254-9658-5948d6a9db37)\"" pod="openstack/heat-api-5b76ffc8f4-6qc4h" podUID="33681235-9f03-4254-9658-5948d6a9db37" Jan 27 12:52:50 crc kubenswrapper[4900]: I0127 12:52:50.206512 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-nqj4r" podUID="78573fe2-69fe-42f2-9196-304828739af1" containerName="registry-server" containerID="cri-o://352260c4d422dfb0d2c99d63e79f78e1be4da041df4b8f73c51499f307e02863" gracePeriod=2 Jan 27 12:52:50 crc kubenswrapper[4900]: I0127 12:52:50.206889 4900 scope.go:117] "RemoveContainer" containerID="706172a72774f2fd5baa9552e1ce4a1b2672708b13b01f91fa0dc25b76c1de4f" Jan 27 12:52:50 crc kubenswrapper[4900]: E0127 12:52:50.207209 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7c966d6899-4tntc_openstack(39372723-3fe3-4691-b1ed-16d971262f96)\"" pod="openstack/heat-cfnapi-7c966d6899-4tntc" podUID="39372723-3fe3-4691-b1ed-16d971262f96" Jan 27 12:52:50 crc kubenswrapper[4900]: I0127 12:52:50.210405 4900 scope.go:117] "RemoveContainer" containerID="3dfff72645a7c79fb7b2dad52bfe3a82fb405d57e788e3228585b4a1ec3b718e" Jan 27 12:52:50 crc kubenswrapper[4900]: E0127 12:52:50.210929 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-5b76ffc8f4-6qc4h_openstack(33681235-9f03-4254-9658-5948d6a9db37)\"" pod="openstack/heat-api-5b76ffc8f4-6qc4h" podUID="33681235-9f03-4254-9658-5948d6a9db37" Jan 27 12:52:50 crc kubenswrapper[4900]: I0127 12:52:50.761091 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:52:50 crc kubenswrapper[4900]: I0127 12:52:50.799899 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:52:50 crc kubenswrapper[4900]: I0127 12:52:50.800684 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a5b99b87-204a-4232-a935-ca645a00f906" containerName="glance-log" containerID="cri-o://d08b67263191daaa6407ee3acf61da67d89289f11204efe1ffe8614bbbfc3b02" gracePeriod=30 Jan 27 12:52:50 crc kubenswrapper[4900]: I0127 12:52:50.801935 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a5b99b87-204a-4232-a935-ca645a00f906" containerName="glance-httpd" containerID="cri-o://7e03f4d7c487c359b2d259c5a1eb3d5d58c6cc71dafe5b9cd2d871f47f658af0" gracePeriod=30 Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.226978 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sq822" event={"ID":"157a4f73-69ff-48a5-a97b-e78693201660","Type":"ContainerStarted","Data":"b3716c6c8c1f2b793bebb386b6f912db9b149e4d368f43331206eda276090c9d"} Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.233018 4900 generic.go:334] "Generic (PLEG): container finished" podID="a5b99b87-204a-4232-a935-ca645a00f906" containerID="d08b67263191daaa6407ee3acf61da67d89289f11204efe1ffe8614bbbfc3b02" exitCode=143 Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.233133 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a5b99b87-204a-4232-a935-ca645a00f906","Type":"ContainerDied","Data":"d08b67263191daaa6407ee3acf61da67d89289f11204efe1ffe8614bbbfc3b02"} Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.237986 4900 generic.go:334] "Generic (PLEG): container finished" podID="78573fe2-69fe-42f2-9196-304828739af1" containerID="352260c4d422dfb0d2c99d63e79f78e1be4da041df4b8f73c51499f307e02863" exitCode=0 Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.238071 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nqj4r" event={"ID":"78573fe2-69fe-42f2-9196-304828739af1","Type":"ContainerDied","Data":"352260c4d422dfb0d2c99d63e79f78e1be4da041df4b8f73c51499f307e02863"} Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.244415 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3521fa03-6a1d-4bd6-90ed-2802291f5949","Type":"ContainerStarted","Data":"a7519d46fbca6565c0723aa989920192a7d3acdcc95e00147146b3471fa0a7e5"} Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.531535 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.563445 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sq822" podStartSLOduration=17.537450091 podStartE2EDuration="23.563423407s" podCreationTimestamp="2026-01-27 12:52:28 +0000 UTC" firstStartedPulling="2026-01-27 12:52:44.622405727 +0000 UTC m=+1591.859433937" lastFinishedPulling="2026-01-27 12:52:50.648379043 +0000 UTC m=+1597.885407253" observedRunningTime="2026-01-27 12:52:51.271544879 +0000 UTC m=+1598.508573089" watchObservedRunningTime="2026-01-27 12:52:51.563423407 +0000 UTC m=+1598.800451607" Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.711810 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78573fe2-69fe-42f2-9196-304828739af1-catalog-content\") pod \"78573fe2-69fe-42f2-9196-304828739af1\" (UID: \"78573fe2-69fe-42f2-9196-304828739af1\") " Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.712487 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2c9gt\" (UniqueName: \"kubernetes.io/projected/78573fe2-69fe-42f2-9196-304828739af1-kube-api-access-2c9gt\") pod \"78573fe2-69fe-42f2-9196-304828739af1\" (UID: \"78573fe2-69fe-42f2-9196-304828739af1\") " Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.712546 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78573fe2-69fe-42f2-9196-304828739af1-utilities\") pod \"78573fe2-69fe-42f2-9196-304828739af1\" (UID: \"78573fe2-69fe-42f2-9196-304828739af1\") " Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.713368 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78573fe2-69fe-42f2-9196-304828739af1-utilities" (OuterVolumeSpecName: "utilities") pod "78573fe2-69fe-42f2-9196-304828739af1" (UID: "78573fe2-69fe-42f2-9196-304828739af1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.713737 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78573fe2-69fe-42f2-9196-304828739af1-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.738143 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78573fe2-69fe-42f2-9196-304828739af1-kube-api-access-2c9gt" (OuterVolumeSpecName: "kube-api-access-2c9gt") pod "78573fe2-69fe-42f2-9196-304828739af1" (UID: "78573fe2-69fe-42f2-9196-304828739af1"). InnerVolumeSpecName "kube-api-access-2c9gt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.787903 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78573fe2-69fe-42f2-9196-304828739af1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "78573fe2-69fe-42f2-9196-304828739af1" (UID: "78573fe2-69fe-42f2-9196-304828739af1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.816230 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2c9gt\" (UniqueName: \"kubernetes.io/projected/78573fe2-69fe-42f2-9196-304828739af1-kube-api-access-2c9gt\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:51 crc kubenswrapper[4900]: I0127 12:52:51.816281 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78573fe2-69fe-42f2-9196-304828739af1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.266099 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nqj4r" Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.267319 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nqj4r" event={"ID":"78573fe2-69fe-42f2-9196-304828739af1","Type":"ContainerDied","Data":"dee8f4fc940b93d6acabd29df0ae07004aa7e3ba87a1e06f3a7a6d3f1fefe0ca"} Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.269818 4900 scope.go:117] "RemoveContainer" containerID="352260c4d422dfb0d2c99d63e79f78e1be4da041df4b8f73c51499f307e02863" Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.325901 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nqj4r"] Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.344313 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-nqj4r"] Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.355219 4900 scope.go:117] "RemoveContainer" containerID="a1a91d635ddacbe8262aae3cbffbdc769453c520a7cedf73ab7fd5040ace1954" Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.373368 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.373428 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.373477 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.384950 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.385066 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" gracePeriod=600 Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.401207 4900 scope.go:117] "RemoveContainer" containerID="bc3f1d0846cf21c742949349e9446c182df389f47851790609f674e1605c3bcb" Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.496886 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78573fe2-69fe-42f2-9196-304828739af1" path="/var/lib/kubelet/pods/78573fe2-69fe-42f2-9196-304828739af1/volumes" Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.672278 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:52:52 crc kubenswrapper[4900]: I0127 12:52:52.781759 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5b76ffc8f4-6qc4h"] Jan 27 12:52:53 crc kubenswrapper[4900]: E0127 12:52:53.026598 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.303578 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3521fa03-6a1d-4bd6-90ed-2802291f5949","Type":"ContainerStarted","Data":"323487d1551365979d002a84ee731281d5a486b523c63d082300f405b63876cb"} Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.304455 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="ceilometer-central-agent" containerID="cri-o://2fca4dd6f4fa1e6ff8ae3544b3db5ac191bb6b626d0f77460386de3b51998409" gracePeriod=30 Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.304573 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.305471 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="proxy-httpd" containerID="cri-o://323487d1551365979d002a84ee731281d5a486b523c63d082300f405b63876cb" gracePeriod=30 Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.305548 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="sg-core" containerID="cri-o://a7519d46fbca6565c0723aa989920192a7d3acdcc95e00147146b3471fa0a7e5" gracePeriod=30 Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.305596 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="ceilometer-notification-agent" containerID="cri-o://c1678ebc3979a5407fafa350da2da2447438e4d5bba994578d21f3ae7d22dc06" gracePeriod=30 Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.361958 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.449017306 podStartE2EDuration="10.361929147s" podCreationTimestamp="2026-01-27 12:52:43 +0000 UTC" firstStartedPulling="2026-01-27 12:52:45.667946536 +0000 UTC m=+1592.904974746" lastFinishedPulling="2026-01-27 12:52:52.580858377 +0000 UTC m=+1599.817886587" observedRunningTime="2026-01-27 12:52:53.32876036 +0000 UTC m=+1600.565788580" watchObservedRunningTime="2026-01-27 12:52:53.361929147 +0000 UTC m=+1600.598957357" Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.379457 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" exitCode=0 Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.379718 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876"} Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.379772 4900 scope.go:117] "RemoveContainer" containerID="d3eb521560952eab8f11162cda8d03a25740b3b833254e8284177a101ff26343" Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.381005 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:52:53 crc kubenswrapper[4900]: E0127 12:52:53.381578 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.384782 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.466716 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-config-data-custom\") pod \"33681235-9f03-4254-9658-5948d6a9db37\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.466990 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-config-data\") pod \"33681235-9f03-4254-9658-5948d6a9db37\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.467022 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-trhxj\" (UniqueName: \"kubernetes.io/projected/33681235-9f03-4254-9658-5948d6a9db37-kube-api-access-trhxj\") pod \"33681235-9f03-4254-9658-5948d6a9db37\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.467075 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-combined-ca-bundle\") pod \"33681235-9f03-4254-9658-5948d6a9db37\" (UID: \"33681235-9f03-4254-9658-5948d6a9db37\") " Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.539877 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "33681235-9f03-4254-9658-5948d6a9db37" (UID: "33681235-9f03-4254-9658-5948d6a9db37"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.578368 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33681235-9f03-4254-9658-5948d6a9db37-kube-api-access-trhxj" (OuterVolumeSpecName: "kube-api-access-trhxj") pod "33681235-9f03-4254-9658-5948d6a9db37" (UID: "33681235-9f03-4254-9658-5948d6a9db37"). InnerVolumeSpecName "kube-api-access-trhxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.586286 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-config-data" (OuterVolumeSpecName: "config-data") pod "33681235-9f03-4254-9658-5948d6a9db37" (UID: "33681235-9f03-4254-9658-5948d6a9db37"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.589880 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.589939 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-trhxj\" (UniqueName: \"kubernetes.io/projected/33681235-9f03-4254-9658-5948d6a9db37-kube-api-access-trhxj\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.589957 4900 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.612227 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "33681235-9f03-4254-9658-5948d6a9db37" (UID: "33681235-9f03-4254-9658-5948d6a9db37"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:53 crc kubenswrapper[4900]: I0127 12:52:53.697991 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33681235-9f03-4254-9658-5948d6a9db37-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:54 crc kubenswrapper[4900]: E0127 12:52:54.030346 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3521fa03_6a1d_4bd6_90ed_2802291f5949.slice/crio-c1678ebc3979a5407fafa350da2da2447438e4d5bba994578d21f3ae7d22dc06.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3521fa03_6a1d_4bd6_90ed_2802291f5949.slice/crio-conmon-c1678ebc3979a5407fafa350da2da2447438e4d5bba994578d21f3ae7d22dc06.scope\": RecentStats: unable to find data in memory cache]" Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.408945 4900 generic.go:334] "Generic (PLEG): container finished" podID="a5b99b87-204a-4232-a935-ca645a00f906" containerID="7e03f4d7c487c359b2d259c5a1eb3d5d58c6cc71dafe5b9cd2d871f47f658af0" exitCode=0 Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.409121 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a5b99b87-204a-4232-a935-ca645a00f906","Type":"ContainerDied","Data":"7e03f4d7c487c359b2d259c5a1eb3d5d58c6cc71dafe5b9cd2d871f47f658af0"} Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.421442 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5b76ffc8f4-6qc4h" Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.421436 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5b76ffc8f4-6qc4h" event={"ID":"33681235-9f03-4254-9658-5948d6a9db37","Type":"ContainerDied","Data":"c4fb5c343603c064c6bbb05d35017d945ad9d3db807b7a038365d0ace97aaf56"} Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.421527 4900 scope.go:117] "RemoveContainer" containerID="3dfff72645a7c79fb7b2dad52bfe3a82fb405d57e788e3228585b4a1ec3b718e" Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.479855 4900 generic.go:334] "Generic (PLEG): container finished" podID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerID="323487d1551365979d002a84ee731281d5a486b523c63d082300f405b63876cb" exitCode=0 Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.479917 4900 generic.go:334] "Generic (PLEG): container finished" podID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerID="a7519d46fbca6565c0723aa989920192a7d3acdcc95e00147146b3471fa0a7e5" exitCode=2 Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.479927 4900 generic.go:334] "Generic (PLEG): container finished" podID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerID="c1678ebc3979a5407fafa350da2da2447438e4d5bba994578d21f3ae7d22dc06" exitCode=0 Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.479972 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3521fa03-6a1d-4bd6-90ed-2802291f5949","Type":"ContainerDied","Data":"323487d1551365979d002a84ee731281d5a486b523c63d082300f405b63876cb"} Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.480007 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3521fa03-6a1d-4bd6-90ed-2802291f5949","Type":"ContainerDied","Data":"a7519d46fbca6565c0723aa989920192a7d3acdcc95e00147146b3471fa0a7e5"} Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.480021 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3521fa03-6a1d-4bd6-90ed-2802291f5949","Type":"ContainerDied","Data":"c1678ebc3979a5407fafa350da2da2447438e4d5bba994578d21f3ae7d22dc06"} Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.589740 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5b76ffc8f4-6qc4h"] Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.640076 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-5b76ffc8f4-6qc4h"] Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.726936 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.836808 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") pod \"a5b99b87-204a-4232-a935-ca645a00f906\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.836937 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-public-tls-certs\") pod \"a5b99b87-204a-4232-a935-ca645a00f906\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.836999 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5b99b87-204a-4232-a935-ca645a00f906-logs\") pod \"a5b99b87-204a-4232-a935-ca645a00f906\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.837104 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhmgv\" (UniqueName: \"kubernetes.io/projected/a5b99b87-204a-4232-a935-ca645a00f906-kube-api-access-hhmgv\") pod \"a5b99b87-204a-4232-a935-ca645a00f906\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.837149 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-config-data\") pod \"a5b99b87-204a-4232-a935-ca645a00f906\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.837337 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a5b99b87-204a-4232-a935-ca645a00f906-httpd-run\") pod \"a5b99b87-204a-4232-a935-ca645a00f906\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.837404 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-combined-ca-bundle\") pod \"a5b99b87-204a-4232-a935-ca645a00f906\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.837447 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-scripts\") pod \"a5b99b87-204a-4232-a935-ca645a00f906\" (UID: \"a5b99b87-204a-4232-a935-ca645a00f906\") " Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.839621 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5b99b87-204a-4232-a935-ca645a00f906-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a5b99b87-204a-4232-a935-ca645a00f906" (UID: "a5b99b87-204a-4232-a935-ca645a00f906"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.839691 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5b99b87-204a-4232-a935-ca645a00f906-logs" (OuterVolumeSpecName: "logs") pod "a5b99b87-204a-4232-a935-ca645a00f906" (UID: "a5b99b87-204a-4232-a935-ca645a00f906"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.840536 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5b99b87-204a-4232-a935-ca645a00f906-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.840559 4900 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a5b99b87-204a-4232-a935-ca645a00f906-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.847362 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-scripts" (OuterVolumeSpecName: "scripts") pod "a5b99b87-204a-4232-a935-ca645a00f906" (UID: "a5b99b87-204a-4232-a935-ca645a00f906"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.866016 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5b99b87-204a-4232-a935-ca645a00f906-kube-api-access-hhmgv" (OuterVolumeSpecName: "kube-api-access-hhmgv") pod "a5b99b87-204a-4232-a935-ca645a00f906" (UID: "a5b99b87-204a-4232-a935-ca645a00f906"). InnerVolumeSpecName "kube-api-access-hhmgv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.914633 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a5b99b87-204a-4232-a935-ca645a00f906" (UID: "a5b99b87-204a-4232-a935-ca645a00f906"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.924218 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b" (OuterVolumeSpecName: "glance") pod "a5b99b87-204a-4232-a935-ca645a00f906" (UID: "a5b99b87-204a-4232-a935-ca645a00f906"). InnerVolumeSpecName "pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.947010 4900 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") on node \"crc\" " Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.965474 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhmgv\" (UniqueName: \"kubernetes.io/projected/a5b99b87-204a-4232-a935-ca645a00f906-kube-api-access-hhmgv\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.965504 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:54 crc kubenswrapper[4900]: I0127 12:52:54.965520 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.023139 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-config-data" (OuterVolumeSpecName: "config-data") pod "a5b99b87-204a-4232-a935-ca645a00f906" (UID: "a5b99b87-204a-4232-a935-ca645a00f906"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.024876 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a5b99b87-204a-4232-a935-ca645a00f906" (UID: "a5b99b87-204a-4232-a935-ca645a00f906"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.033845 4900 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.034047 4900 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b") on node "crc" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.068137 4900 reconciler_common.go:293] "Volume detached for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.068175 4900 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.068185 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5b99b87-204a-4232-a935-ca645a00f906-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.438050 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.438648 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0be8476d-4715-4bb9-80da-51be3e2a13f5" containerName="glance-log" containerID="cri-o://a11533ea663b5a9f4b59c504426773b53f0340a96987b0693afa908b552664a3" gracePeriod=30 Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.438778 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0be8476d-4715-4bb9-80da-51be3e2a13f5" containerName="glance-httpd" containerID="cri-o://8c8f2538bbe8cb65c92e9242b91b79b48cb497ead1d707f269c972c9d1fe305d" gracePeriod=30 Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.558561 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"ded5eaa8-8d7d-4ee2-bad6-62da18024e33","Type":"ContainerStarted","Data":"bc00b7194af2191caa268b4307ce98f36052685dbab839639b3e18ac7d303c8c"} Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.597590 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a5b99b87-204a-4232-a935-ca645a00f906","Type":"ContainerDied","Data":"a8c98ad8358c5bc0ae03d32179d2006e5e6d6c52bd2048141e20c057c18ff0d9"} Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.597788 4900 scope.go:117] "RemoveContainer" containerID="7e03f4d7c487c359b2d259c5a1eb3d5d58c6cc71dafe5b9cd2d871f47f658af0" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.598230 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.600730 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.411691988 podStartE2EDuration="41.600696039s" podCreationTimestamp="2026-01-27 12:52:14 +0000 UTC" firstStartedPulling="2026-01-27 12:52:15.848376927 +0000 UTC m=+1563.085405137" lastFinishedPulling="2026-01-27 12:52:55.037380978 +0000 UTC m=+1602.274409188" observedRunningTime="2026-01-27 12:52:55.582268212 +0000 UTC m=+1602.819296432" watchObservedRunningTime="2026-01-27 12:52:55.600696039 +0000 UTC m=+1602.837724249" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.656678 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.788876 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7c966d6899-4tntc"] Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.879073 4900 scope.go:117] "RemoveContainer" containerID="d08b67263191daaa6407ee3acf61da67d89289f11204efe1ffe8614bbbfc3b02" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.938542 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.964229 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.984167 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:52:55 crc kubenswrapper[4900]: E0127 12:52:55.985313 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33681235-9f03-4254-9658-5948d6a9db37" containerName="heat-api" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.985337 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="33681235-9f03-4254-9658-5948d6a9db37" containerName="heat-api" Jan 27 12:52:55 crc kubenswrapper[4900]: E0127 12:52:55.985392 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33681235-9f03-4254-9658-5948d6a9db37" containerName="heat-api" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.985402 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="33681235-9f03-4254-9658-5948d6a9db37" containerName="heat-api" Jan 27 12:52:55 crc kubenswrapper[4900]: E0127 12:52:55.985425 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5b99b87-204a-4232-a935-ca645a00f906" containerName="glance-httpd" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.985434 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5b99b87-204a-4232-a935-ca645a00f906" containerName="glance-httpd" Jan 27 12:52:55 crc kubenswrapper[4900]: E0127 12:52:55.985464 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78573fe2-69fe-42f2-9196-304828739af1" containerName="extract-utilities" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.985473 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="78573fe2-69fe-42f2-9196-304828739af1" containerName="extract-utilities" Jan 27 12:52:55 crc kubenswrapper[4900]: E0127 12:52:55.985489 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78573fe2-69fe-42f2-9196-304828739af1" containerName="extract-content" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.985497 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="78573fe2-69fe-42f2-9196-304828739af1" containerName="extract-content" Jan 27 12:52:55 crc kubenswrapper[4900]: E0127 12:52:55.985521 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78573fe2-69fe-42f2-9196-304828739af1" containerName="registry-server" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.985529 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="78573fe2-69fe-42f2-9196-304828739af1" containerName="registry-server" Jan 27 12:52:55 crc kubenswrapper[4900]: E0127 12:52:55.985545 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5b99b87-204a-4232-a935-ca645a00f906" containerName="glance-log" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.985556 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5b99b87-204a-4232-a935-ca645a00f906" containerName="glance-log" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.986014 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="33681235-9f03-4254-9658-5948d6a9db37" containerName="heat-api" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.986042 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5b99b87-204a-4232-a935-ca645a00f906" containerName="glance-log" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.986080 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="78573fe2-69fe-42f2-9196-304828739af1" containerName="registry-server" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.986101 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5b99b87-204a-4232-a935-ca645a00f906" containerName="glance-httpd" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.986780 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="33681235-9f03-4254-9658-5948d6a9db37" containerName="heat-api" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.988400 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.999341 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 27 12:52:55 crc kubenswrapper[4900]: I0127 12:52:55.999436 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.018103 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.159398 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-scripts\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.159820 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.159885 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-config-data\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.159913 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.159959 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.164846 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.165149 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pjc6\" (UniqueName: \"kubernetes.io/projected/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-kube-api-access-6pjc6\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.165188 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-logs\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.267036 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-config-data\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.267103 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.267148 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.267205 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.267286 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pjc6\" (UniqueName: \"kubernetes.io/projected/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-kube-api-access-6pjc6\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.267307 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-logs\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.267399 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-scripts\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.267436 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.268908 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.269856 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-logs\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.281196 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.281251 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/95185e520e2875ca4693f40e4e3857c43a9e41c071319588428e729bb9badc1a/globalmount\"" pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.287573 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-scripts\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.288476 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-config-data\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.290916 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.304258 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.308214 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pjc6\" (UniqueName: \"kubernetes.io/projected/98607883-1bf0-41e4-a9c1-f41e3d4cf5de-kube-api-access-6pjc6\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.385586 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8d749dfa-a0fe-4415-b40f-1fd970f6ec8b\") pod \"glance-default-external-api-0\" (UID: \"98607883-1bf0-41e4-a9c1-f41e3d4cf5de\") " pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.474639 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.521131 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33681235-9f03-4254-9658-5948d6a9db37" path="/var/lib/kubelet/pods/33681235-9f03-4254-9658-5948d6a9db37/volumes" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.522012 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5b99b87-204a-4232-a935-ca645a00f906" path="/var/lib/kubelet/pods/a5b99b87-204a-4232-a935-ca645a00f906/volumes" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.574458 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-config-data\") pod \"39372723-3fe3-4691-b1ed-16d971262f96\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.574655 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-config-data-custom\") pod \"39372723-3fe3-4691-b1ed-16d971262f96\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.574825 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-combined-ca-bundle\") pod \"39372723-3fe3-4691-b1ed-16d971262f96\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.574909 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-brb4f\" (UniqueName: \"kubernetes.io/projected/39372723-3fe3-4691-b1ed-16d971262f96-kube-api-access-brb4f\") pod \"39372723-3fe3-4691-b1ed-16d971262f96\" (UID: \"39372723-3fe3-4691-b1ed-16d971262f96\") " Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.579626 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "39372723-3fe3-4691-b1ed-16d971262f96" (UID: "39372723-3fe3-4691-b1ed-16d971262f96"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.599632 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39372723-3fe3-4691-b1ed-16d971262f96-kube-api-access-brb4f" (OuterVolumeSpecName: "kube-api-access-brb4f") pod "39372723-3fe3-4691-b1ed-16d971262f96" (UID: "39372723-3fe3-4691-b1ed-16d971262f96"). InnerVolumeSpecName "kube-api-access-brb4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.622519 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7c966d6899-4tntc" event={"ID":"39372723-3fe3-4691-b1ed-16d971262f96","Type":"ContainerDied","Data":"2b597b41daf97691aa4b8f07b6e916b90314c06e8cf47454db039df0e6458003"} Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.622578 4900 scope.go:117] "RemoveContainer" containerID="706172a72774f2fd5baa9552e1ce4a1b2672708b13b01f91fa0dc25b76c1de4f" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.622649 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7c966d6899-4tntc" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.637286 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "39372723-3fe3-4691-b1ed-16d971262f96" (UID: "39372723-3fe3-4691-b1ed-16d971262f96"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.639138 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.643932 4900 generic.go:334] "Generic (PLEG): container finished" podID="0be8476d-4715-4bb9-80da-51be3e2a13f5" containerID="a11533ea663b5a9f4b59c504426773b53f0340a96987b0693afa908b552664a3" exitCode=143 Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.644004 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0be8476d-4715-4bb9-80da-51be3e2a13f5","Type":"ContainerDied","Data":"a11533ea663b5a9f4b59c504426773b53f0340a96987b0693afa908b552664a3"} Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.666380 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-config-data" (OuterVolumeSpecName: "config-data") pod "39372723-3fe3-4691-b1ed-16d971262f96" (UID: "39372723-3fe3-4691-b1ed-16d971262f96"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.689963 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.690009 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-brb4f\" (UniqueName: \"kubernetes.io/projected/39372723-3fe3-4691-b1ed-16d971262f96-kube-api-access-brb4f\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.690023 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:56 crc kubenswrapper[4900]: I0127 12:52:56.690033 4900 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/39372723-3fe3-4691-b1ed-16d971262f96-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:57 crc kubenswrapper[4900]: I0127 12:52:57.021517 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7c966d6899-4tntc"] Jan 27 12:52:57 crc kubenswrapper[4900]: I0127 12:52:57.051860 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-7c966d6899-4tntc"] Jan 27 12:52:57 crc kubenswrapper[4900]: I0127 12:52:57.350529 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 27 12:52:57 crc kubenswrapper[4900]: I0127 12:52:57.667132 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"98607883-1bf0-41e4-a9c1-f41e3d4cf5de","Type":"ContainerStarted","Data":"4c06387523bce5f1a10c9d929f99f541644a6425c5f7fc93027b3940d90cf56d"} Jan 27 12:52:58 crc kubenswrapper[4900]: I0127 12:52:58.184444 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-api-0" podUID="49ffda39-1561-49d2-a67d-ad7da16103b2" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.226:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 12:52:58 crc kubenswrapper[4900]: I0127 12:52:58.500397 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39372723-3fe3-4691-b1ed-16d971262f96" path="/var/lib/kubelet/pods/39372723-3fe3-4691-b1ed-16d971262f96/volumes" Jan 27 12:52:58 crc kubenswrapper[4900]: I0127 12:52:58.684482 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"98607883-1bf0-41e4-a9c1-f41e3d4cf5de","Type":"ContainerStarted","Data":"ff2bc695bc5dce9515cd4fd7ec7e5b5f204070ba6999aa87120028f832fbf119"} Jan 27 12:52:58 crc kubenswrapper[4900]: I0127 12:52:58.701718 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 27 12:52:58 crc kubenswrapper[4900]: I0127 12:52:58.759021 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="0be8476d-4715-4bb9-80da-51be3e2a13f5" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.188:9292/healthcheck\": read tcp 10.217.0.2:37946->10.217.0.188:9292: read: connection reset by peer" Jan 27 12:52:58 crc kubenswrapper[4900]: I0127 12:52:58.759081 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="0be8476d-4715-4bb9-80da-51be3e2a13f5" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.188:9292/healthcheck\": read tcp 10.217.0.2:37950->10.217.0.188:9292: read: connection reset by peer" Jan 27 12:52:58 crc kubenswrapper[4900]: I0127 12:52:58.964690 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:52:58 crc kubenswrapper[4900]: I0127 12:52:58.965086 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.591850 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.603113 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.707483 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-78cfbd797c-pm2mm"] Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.708702 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-78cfbd797c-pm2mm" podUID="c23e8577-70e1-4e21-a841-6c34251756f7" containerName="heat-engine" containerID="cri-o://2def81c69cc67d433add5bf662b4a8aa7fa813aaf1c30023faacb90dfb792017" gracePeriod=60 Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.715522 4900 generic.go:334] "Generic (PLEG): container finished" podID="0be8476d-4715-4bb9-80da-51be3e2a13f5" containerID="8c8f2538bbe8cb65c92e9242b91b79b48cb497ead1d707f269c972c9d1fe305d" exitCode=0 Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.715607 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0be8476d-4715-4bb9-80da-51be3e2a13f5","Type":"ContainerDied","Data":"8c8f2538bbe8cb65c92e9242b91b79b48cb497ead1d707f269c972c9d1fe305d"} Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.715640 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0be8476d-4715-4bb9-80da-51be3e2a13f5","Type":"ContainerDied","Data":"288894f3d127df23197c00b8967b257b7a3e9d73a8bd6c2a8c05334a6b23857e"} Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.715658 4900 scope.go:117] "RemoveContainer" containerID="8c8f2538bbe8cb65c92e9242b91b79b48cb497ead1d707f269c972c9d1fe305d" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.715864 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.733946 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"98607883-1bf0-41e4-a9c1-f41e3d4cf5de","Type":"ContainerStarted","Data":"06afe9a6d01796f01b94db294ab8c7314dd3d2c30966df5dbe88a6f397269e76"} Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.752767 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f84rj\" (UniqueName: \"kubernetes.io/projected/0be8476d-4715-4bb9-80da-51be3e2a13f5-kube-api-access-f84rj\") pod \"0be8476d-4715-4bb9-80da-51be3e2a13f5\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.752903 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-config-data\") pod \"0be8476d-4715-4bb9-80da-51be3e2a13f5\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.752976 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0be8476d-4715-4bb9-80da-51be3e2a13f5-httpd-run\") pod \"0be8476d-4715-4bb9-80da-51be3e2a13f5\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.753049 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0be8476d-4715-4bb9-80da-51be3e2a13f5-logs\") pod \"0be8476d-4715-4bb9-80da-51be3e2a13f5\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.753115 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-combined-ca-bundle\") pod \"0be8476d-4715-4bb9-80da-51be3e2a13f5\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.753167 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-internal-tls-certs\") pod \"0be8476d-4715-4bb9-80da-51be3e2a13f5\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.753287 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-scripts\") pod \"0be8476d-4715-4bb9-80da-51be3e2a13f5\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.753884 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") pod \"0be8476d-4715-4bb9-80da-51be3e2a13f5\" (UID: \"0be8476d-4715-4bb9-80da-51be3e2a13f5\") " Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.753940 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0be8476d-4715-4bb9-80da-51be3e2a13f5-logs" (OuterVolumeSpecName: "logs") pod "0be8476d-4715-4bb9-80da-51be3e2a13f5" (UID: "0be8476d-4715-4bb9-80da-51be3e2a13f5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.754858 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0be8476d-4715-4bb9-80da-51be3e2a13f5-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.755251 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0be8476d-4715-4bb9-80da-51be3e2a13f5-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "0be8476d-4715-4bb9-80da-51be3e2a13f5" (UID: "0be8476d-4715-4bb9-80da-51be3e2a13f5"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.763019 4900 scope.go:117] "RemoveContainer" containerID="a11533ea663b5a9f4b59c504426773b53f0340a96987b0693afa908b552664a3" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.763127 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0be8476d-4715-4bb9-80da-51be3e2a13f5-kube-api-access-f84rj" (OuterVolumeSpecName: "kube-api-access-f84rj") pod "0be8476d-4715-4bb9-80da-51be3e2a13f5" (UID: "0be8476d-4715-4bb9-80da-51be3e2a13f5"). InnerVolumeSpecName "kube-api-access-f84rj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.779199 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-scripts" (OuterVolumeSpecName: "scripts") pod "0be8476d-4715-4bb9-80da-51be3e2a13f5" (UID: "0be8476d-4715-4bb9-80da-51be3e2a13f5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.804199 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.804179517 podStartE2EDuration="4.804179517s" podCreationTimestamp="2026-01-27 12:52:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:52:59.796627096 +0000 UTC m=+1607.033655306" watchObservedRunningTime="2026-01-27 12:52:59.804179517 +0000 UTC m=+1607.041207727" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.811633 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33" (OuterVolumeSpecName: "glance") pod "0be8476d-4715-4bb9-80da-51be3e2a13f5" (UID: "0be8476d-4715-4bb9-80da-51be3e2a13f5"). InnerVolumeSpecName "pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.821219 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0be8476d-4715-4bb9-80da-51be3e2a13f5" (UID: "0be8476d-4715-4bb9-80da-51be3e2a13f5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.864034 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.868137 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.868396 4900 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") on node \"crc\" " Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.868510 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f84rj\" (UniqueName: \"kubernetes.io/projected/0be8476d-4715-4bb9-80da-51be3e2a13f5-kube-api-access-f84rj\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.868640 4900 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0be8476d-4715-4bb9-80da-51be3e2a13f5-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.889952 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0be8476d-4715-4bb9-80da-51be3e2a13f5" (UID: "0be8476d-4715-4bb9-80da-51be3e2a13f5"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.964664 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-config-data" (OuterVolumeSpecName: "config-data") pod "0be8476d-4715-4bb9-80da-51be3e2a13f5" (UID: "0be8476d-4715-4bb9-80da-51be3e2a13f5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.965085 4900 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.965325 4900 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33") on node "crc" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.976171 4900 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.976213 4900 reconciler_common.go:293] "Volume detached for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") on node \"crc\" DevicePath \"\"" Jan 27 12:52:59 crc kubenswrapper[4900]: I0127 12:52:59.976225 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be8476d-4715-4bb9-80da-51be3e2a13f5-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.063664 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-sq822" podUID="157a4f73-69ff-48a5-a97b-e78693201660" containerName="registry-server" probeResult="failure" output=< Jan 27 12:53:00 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 12:53:00 crc kubenswrapper[4900]: > Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.072351 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.091499 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.115097 4900 scope.go:117] "RemoveContainer" containerID="8c8f2538bbe8cb65c92e9242b91b79b48cb497ead1d707f269c972c9d1fe305d" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.118553 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:53:00 crc kubenswrapper[4900]: E0127 12:53:00.119480 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39372723-3fe3-4691-b1ed-16d971262f96" containerName="heat-cfnapi" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.123535 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="39372723-3fe3-4691-b1ed-16d971262f96" containerName="heat-cfnapi" Jan 27 12:53:00 crc kubenswrapper[4900]: E0127 12:53:00.123671 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39372723-3fe3-4691-b1ed-16d971262f96" containerName="heat-cfnapi" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.123781 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="39372723-3fe3-4691-b1ed-16d971262f96" containerName="heat-cfnapi" Jan 27 12:53:00 crc kubenswrapper[4900]: E0127 12:53:00.123904 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0be8476d-4715-4bb9-80da-51be3e2a13f5" containerName="glance-httpd" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.123986 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="0be8476d-4715-4bb9-80da-51be3e2a13f5" containerName="glance-httpd" Jan 27 12:53:00 crc kubenswrapper[4900]: E0127 12:53:00.124100 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0be8476d-4715-4bb9-80da-51be3e2a13f5" containerName="glance-log" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.124180 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="0be8476d-4715-4bb9-80da-51be3e2a13f5" containerName="glance-log" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.124607 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="0be8476d-4715-4bb9-80da-51be3e2a13f5" containerName="glance-httpd" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.124696 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="39372723-3fe3-4691-b1ed-16d971262f96" containerName="heat-cfnapi" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.124758 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="0be8476d-4715-4bb9-80da-51be3e2a13f5" containerName="glance-log" Jan 27 12:53:00 crc kubenswrapper[4900]: E0127 12:53:00.121266 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c8f2538bbe8cb65c92e9242b91b79b48cb497ead1d707f269c972c9d1fe305d\": container with ID starting with 8c8f2538bbe8cb65c92e9242b91b79b48cb497ead1d707f269c972c9d1fe305d not found: ID does not exist" containerID="8c8f2538bbe8cb65c92e9242b91b79b48cb497ead1d707f269c972c9d1fe305d" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.125381 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c8f2538bbe8cb65c92e9242b91b79b48cb497ead1d707f269c972c9d1fe305d"} err="failed to get container status \"8c8f2538bbe8cb65c92e9242b91b79b48cb497ead1d707f269c972c9d1fe305d\": rpc error: code = NotFound desc = could not find container \"8c8f2538bbe8cb65c92e9242b91b79b48cb497ead1d707f269c972c9d1fe305d\": container with ID starting with 8c8f2538bbe8cb65c92e9242b91b79b48cb497ead1d707f269c972c9d1fe305d not found: ID does not exist" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.125424 4900 scope.go:117] "RemoveContainer" containerID="a11533ea663b5a9f4b59c504426773b53f0340a96987b0693afa908b552664a3" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.126417 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="39372723-3fe3-4691-b1ed-16d971262f96" containerName="heat-cfnapi" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.127596 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: E0127 12:53:00.129299 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a11533ea663b5a9f4b59c504426773b53f0340a96987b0693afa908b552664a3\": container with ID starting with a11533ea663b5a9f4b59c504426773b53f0340a96987b0693afa908b552664a3 not found: ID does not exist" containerID="a11533ea663b5a9f4b59c504426773b53f0340a96987b0693afa908b552664a3" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.129347 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a11533ea663b5a9f4b59c504426773b53f0340a96987b0693afa908b552664a3"} err="failed to get container status \"a11533ea663b5a9f4b59c504426773b53f0340a96987b0693afa908b552664a3\": rpc error: code = NotFound desc = could not find container \"a11533ea663b5a9f4b59c504426773b53f0340a96987b0693afa908b552664a3\": container with ID starting with a11533ea663b5a9f4b59c504426773b53f0340a96987b0693afa908b552664a3 not found: ID does not exist" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.130101 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.140018 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.150471 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.286211 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.286421 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/21602597-32ce-4d1d-8215-951fd259bc77-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.286738 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21602597-32ce-4d1d-8215-951fd259bc77-scripts\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.286911 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21602597-32ce-4d1d-8215-951fd259bc77-logs\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.287075 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-427lv\" (UniqueName: \"kubernetes.io/projected/21602597-32ce-4d1d-8215-951fd259bc77-kube-api-access-427lv\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.287219 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21602597-32ce-4d1d-8215-951fd259bc77-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.287443 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21602597-32ce-4d1d-8215-951fd259bc77-config-data\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.287538 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/21602597-32ce-4d1d-8215-951fd259bc77-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.389922 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21602597-32ce-4d1d-8215-951fd259bc77-config-data\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.390329 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/21602597-32ce-4d1d-8215-951fd259bc77-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.390419 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.390460 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/21602597-32ce-4d1d-8215-951fd259bc77-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.390552 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21602597-32ce-4d1d-8215-951fd259bc77-scripts\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.390616 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21602597-32ce-4d1d-8215-951fd259bc77-logs\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.390666 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-427lv\" (UniqueName: \"kubernetes.io/projected/21602597-32ce-4d1d-8215-951fd259bc77-kube-api-access-427lv\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.390713 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21602597-32ce-4d1d-8215-951fd259bc77-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.391272 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/21602597-32ce-4d1d-8215-951fd259bc77-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.391376 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/21602597-32ce-4d1d-8215-951fd259bc77-logs\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.401173 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/21602597-32ce-4d1d-8215-951fd259bc77-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.401750 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.401790 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/4b03b1e0d87c93578458a398975bf619ff063e17c81725aa528a6bf239df8b6c/globalmount\"" pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.411152 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21602597-32ce-4d1d-8215-951fd259bc77-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.419851 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21602597-32ce-4d1d-8215-951fd259bc77-config-data\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.420941 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21602597-32ce-4d1d-8215-951fd259bc77-scripts\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.435894 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-427lv\" (UniqueName: \"kubernetes.io/projected/21602597-32ce-4d1d-8215-951fd259bc77-kube-api-access-427lv\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.487607 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d451ee09-5d98-42c1-92c0-93c1f575ae33\") pod \"glance-default-internal-api-0\" (UID: \"21602597-32ce-4d1d-8215-951fd259bc77\") " pod="openstack/glance-default-internal-api-0" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.562223 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0be8476d-4715-4bb9-80da-51be3e2a13f5" path="/var/lib/kubelet/pods/0be8476d-4715-4bb9-80da-51be3e2a13f5/volumes" Jan 27 12:53:00 crc kubenswrapper[4900]: I0127 12:53:00.767199 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 27 12:53:01 crc kubenswrapper[4900]: I0127 12:53:01.334438 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 27 12:53:01 crc kubenswrapper[4900]: I0127 12:53:01.815035 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"21602597-32ce-4d1d-8215-951fd259bc77","Type":"ContainerStarted","Data":"982cad2d7098ea43465efcc162597cf86c6cdc0263cf816fd9d5673b06f62121"} Jan 27 12:53:02 crc kubenswrapper[4900]: I0127 12:53:02.839544 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"21602597-32ce-4d1d-8215-951fd259bc77","Type":"ContainerStarted","Data":"5d5003e9c2d91f027e9d9ed471467837f15018a826430a12d2b2c36d8b15e10e"} Jan 27 12:53:03 crc kubenswrapper[4900]: I0127 12:53:03.883951 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"21602597-32ce-4d1d-8215-951fd259bc77","Type":"ContainerStarted","Data":"5ebb11742f7a4e917b3ce64a7a7875c9b04770ab8b3a35c51f3c50959ff91fb6"} Jan 27 12:53:03 crc kubenswrapper[4900]: I0127 12:53:03.927466 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.927435935 podStartE2EDuration="3.927435935s" podCreationTimestamp="2026-01-27 12:53:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:53:03.907204705 +0000 UTC m=+1611.144232915" watchObservedRunningTime="2026-01-27 12:53:03.927435935 +0000 UTC m=+1611.164464155" Jan 27 12:53:06 crc kubenswrapper[4900]: I0127 12:53:06.642431 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 27 12:53:06 crc kubenswrapper[4900]: I0127 12:53:06.643015 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 27 12:53:06 crc kubenswrapper[4900]: I0127 12:53:06.686102 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 27 12:53:06 crc kubenswrapper[4900]: I0127 12:53:06.694687 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 27 12:53:06 crc kubenswrapper[4900]: I0127 12:53:06.948669 4900 generic.go:334] "Generic (PLEG): container finished" podID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerID="2fca4dd6f4fa1e6ff8ae3544b3db5ac191bb6b626d0f77460386de3b51998409" exitCode=0 Jan 27 12:53:06 crc kubenswrapper[4900]: I0127 12:53:06.951494 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3521fa03-6a1d-4bd6-90ed-2802291f5949","Type":"ContainerDied","Data":"2fca4dd6f4fa1e6ff8ae3544b3db5ac191bb6b626d0f77460386de3b51998409"} Jan 27 12:53:06 crc kubenswrapper[4900]: I0127 12:53:06.951537 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 27 12:53:06 crc kubenswrapper[4900]: I0127 12:53:06.951677 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.252690 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.369191 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3521fa03-6a1d-4bd6-90ed-2802291f5949-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3521fa03-6a1d-4bd6-90ed-2802291f5949" (UID: "3521fa03-6a1d-4bd6-90ed-2802291f5949"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.369293 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3521fa03-6a1d-4bd6-90ed-2802291f5949-run-httpd\") pod \"3521fa03-6a1d-4bd6-90ed-2802291f5949\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.369589 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-combined-ca-bundle\") pod \"3521fa03-6a1d-4bd6-90ed-2802291f5949\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.369650 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-scripts\") pod \"3521fa03-6a1d-4bd6-90ed-2802291f5949\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.369903 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrss8\" (UniqueName: \"kubernetes.io/projected/3521fa03-6a1d-4bd6-90ed-2802291f5949-kube-api-access-rrss8\") pod \"3521fa03-6a1d-4bd6-90ed-2802291f5949\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.370089 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3521fa03-6a1d-4bd6-90ed-2802291f5949-log-httpd\") pod \"3521fa03-6a1d-4bd6-90ed-2802291f5949\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.370283 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-sg-core-conf-yaml\") pod \"3521fa03-6a1d-4bd6-90ed-2802291f5949\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.370329 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-config-data\") pod \"3521fa03-6a1d-4bd6-90ed-2802291f5949\" (UID: \"3521fa03-6a1d-4bd6-90ed-2802291f5949\") " Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.371373 4900 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3521fa03-6a1d-4bd6-90ed-2802291f5949-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.371447 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3521fa03-6a1d-4bd6-90ed-2802291f5949-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3521fa03-6a1d-4bd6-90ed-2802291f5949" (UID: "3521fa03-6a1d-4bd6-90ed-2802291f5949"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.376768 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-scripts" (OuterVolumeSpecName: "scripts") pod "3521fa03-6a1d-4bd6-90ed-2802291f5949" (UID: "3521fa03-6a1d-4bd6-90ed-2802291f5949"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.379295 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3521fa03-6a1d-4bd6-90ed-2802291f5949-kube-api-access-rrss8" (OuterVolumeSpecName: "kube-api-access-rrss8") pod "3521fa03-6a1d-4bd6-90ed-2802291f5949" (UID: "3521fa03-6a1d-4bd6-90ed-2802291f5949"). InnerVolumeSpecName "kube-api-access-rrss8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.416267 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3521fa03-6a1d-4bd6-90ed-2802291f5949" (UID: "3521fa03-6a1d-4bd6-90ed-2802291f5949"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.474724 4900 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.475083 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.475216 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrss8\" (UniqueName: \"kubernetes.io/projected/3521fa03-6a1d-4bd6-90ed-2802291f5949-kube-api-access-rrss8\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.475276 4900 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3521fa03-6a1d-4bd6-90ed-2802291f5949-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.516418 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3521fa03-6a1d-4bd6-90ed-2802291f5949" (UID: "3521fa03-6a1d-4bd6-90ed-2802291f5949"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.554279 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-config-data" (OuterVolumeSpecName: "config-data") pod "3521fa03-6a1d-4bd6-90ed-2802291f5949" (UID: "3521fa03-6a1d-4bd6-90ed-2802291f5949"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.577564 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.577611 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3521fa03-6a1d-4bd6-90ed-2802291f5949-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.806695 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.883318 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-config-data-custom\") pod \"c23e8577-70e1-4e21-a841-6c34251756f7\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.883487 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-config-data\") pod \"c23e8577-70e1-4e21-a841-6c34251756f7\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.883575 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-combined-ca-bundle\") pod \"c23e8577-70e1-4e21-a841-6c34251756f7\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.883677 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vcm8g\" (UniqueName: \"kubernetes.io/projected/c23e8577-70e1-4e21-a841-6c34251756f7-kube-api-access-vcm8g\") pod \"c23e8577-70e1-4e21-a841-6c34251756f7\" (UID: \"c23e8577-70e1-4e21-a841-6c34251756f7\") " Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.888165 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c23e8577-70e1-4e21-a841-6c34251756f7-kube-api-access-vcm8g" (OuterVolumeSpecName: "kube-api-access-vcm8g") pod "c23e8577-70e1-4e21-a841-6c34251756f7" (UID: "c23e8577-70e1-4e21-a841-6c34251756f7"). InnerVolumeSpecName "kube-api-access-vcm8g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.890896 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c23e8577-70e1-4e21-a841-6c34251756f7" (UID: "c23e8577-70e1-4e21-a841-6c34251756f7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.922165 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c23e8577-70e1-4e21-a841-6c34251756f7" (UID: "c23e8577-70e1-4e21-a841-6c34251756f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.964771 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-config-data" (OuterVolumeSpecName: "config-data") pod "c23e8577-70e1-4e21-a841-6c34251756f7" (UID: "c23e8577-70e1-4e21-a841-6c34251756f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.968972 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3521fa03-6a1d-4bd6-90ed-2802291f5949","Type":"ContainerDied","Data":"c213da95e4177165306861d8da8540854ee1794b06e36253ba67779344d16b61"} Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.969039 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.969093 4900 scope.go:117] "RemoveContainer" containerID="323487d1551365979d002a84ee731281d5a486b523c63d082300f405b63876cb" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.972749 4900 generic.go:334] "Generic (PLEG): container finished" podID="c23e8577-70e1-4e21-a841-6c34251756f7" containerID="2def81c69cc67d433add5bf662b4a8aa7fa813aaf1c30023faacb90dfb792017" exitCode=0 Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.972839 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-78cfbd797c-pm2mm" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.972904 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-78cfbd797c-pm2mm" event={"ID":"c23e8577-70e1-4e21-a841-6c34251756f7","Type":"ContainerDied","Data":"2def81c69cc67d433add5bf662b4a8aa7fa813aaf1c30023faacb90dfb792017"} Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.972945 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-78cfbd797c-pm2mm" event={"ID":"c23e8577-70e1-4e21-a841-6c34251756f7","Type":"ContainerDied","Data":"7d096827b0b4bab5e06039d470d7968bb32506e256adb95e5d8c95ed49fcd8d9"} Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.985968 4900 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.985998 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.986008 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c23e8577-70e1-4e21-a841-6c34251756f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:07 crc kubenswrapper[4900]: I0127 12:53:07.986022 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vcm8g\" (UniqueName: \"kubernetes.io/projected/c23e8577-70e1-4e21-a841-6c34251756f7-kube-api-access-vcm8g\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.127224 4900 scope.go:117] "RemoveContainer" containerID="a7519d46fbca6565c0723aa989920192a7d3acdcc95e00147146b3471fa0a7e5" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.138644 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.150312 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.173769 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-78cfbd797c-pm2mm"] Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.177249 4900 scope.go:117] "RemoveContainer" containerID="c1678ebc3979a5407fafa350da2da2447438e4d5bba994578d21f3ae7d22dc06" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.190292 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-78cfbd797c-pm2mm"] Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.212146 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:08 crc kubenswrapper[4900]: E0127 12:53:08.212952 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="sg-core" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.212982 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="sg-core" Jan 27 12:53:08 crc kubenswrapper[4900]: E0127 12:53:08.212998 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="ceilometer-notification-agent" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.213008 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="ceilometer-notification-agent" Jan 27 12:53:08 crc kubenswrapper[4900]: E0127 12:53:08.213043 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="ceilometer-central-agent" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.213077 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="ceilometer-central-agent" Jan 27 12:53:08 crc kubenswrapper[4900]: E0127 12:53:08.213097 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="proxy-httpd" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.213107 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="proxy-httpd" Jan 27 12:53:08 crc kubenswrapper[4900]: E0127 12:53:08.213136 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c23e8577-70e1-4e21-a841-6c34251756f7" containerName="heat-engine" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.213144 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="c23e8577-70e1-4e21-a841-6c34251756f7" containerName="heat-engine" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.213470 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="ceilometer-central-agent" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.213505 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="proxy-httpd" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.213526 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="ceilometer-notification-agent" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.213543 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" containerName="sg-core" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.213559 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="c23e8577-70e1-4e21-a841-6c34251756f7" containerName="heat-engine" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.216630 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.221263 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.226791 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.229523 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.245187 4900 scope.go:117] "RemoveContainer" containerID="2fca4dd6f4fa1e6ff8ae3544b3db5ac191bb6b626d0f77460386de3b51998409" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.332618 4900 scope.go:117] "RemoveContainer" containerID="2def81c69cc67d433add5bf662b4a8aa7fa813aaf1c30023faacb90dfb792017" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.398429 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.398483 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbgd6\" (UniqueName: \"kubernetes.io/projected/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-kube-api-access-kbgd6\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.398532 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-log-httpd\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.398617 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.399022 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-config-data\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.399235 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-scripts\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.399301 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-run-httpd\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.462835 4900 scope.go:117] "RemoveContainer" containerID="2def81c69cc67d433add5bf662b4a8aa7fa813aaf1c30023faacb90dfb792017" Jan 27 12:53:08 crc kubenswrapper[4900]: E0127 12:53:08.469669 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2def81c69cc67d433add5bf662b4a8aa7fa813aaf1c30023faacb90dfb792017\": container with ID starting with 2def81c69cc67d433add5bf662b4a8aa7fa813aaf1c30023faacb90dfb792017 not found: ID does not exist" containerID="2def81c69cc67d433add5bf662b4a8aa7fa813aaf1c30023faacb90dfb792017" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.469727 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2def81c69cc67d433add5bf662b4a8aa7fa813aaf1c30023faacb90dfb792017"} err="failed to get container status \"2def81c69cc67d433add5bf662b4a8aa7fa813aaf1c30023faacb90dfb792017\": rpc error: code = NotFound desc = could not find container \"2def81c69cc67d433add5bf662b4a8aa7fa813aaf1c30023faacb90dfb792017\": container with ID starting with 2def81c69cc67d433add5bf662b4a8aa7fa813aaf1c30023faacb90dfb792017 not found: ID does not exist" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.482099 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:53:08 crc kubenswrapper[4900]: E0127 12:53:08.482378 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.497909 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3521fa03-6a1d-4bd6-90ed-2802291f5949" path="/var/lib/kubelet/pods/3521fa03-6a1d-4bd6-90ed-2802291f5949/volumes" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.498760 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c23e8577-70e1-4e21-a841-6c34251756f7" path="/var/lib/kubelet/pods/c23e8577-70e1-4e21-a841-6c34251756f7/volumes" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.501699 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-config-data\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.503450 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-scripts\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.504076 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-run-httpd\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.504154 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.504195 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbgd6\" (UniqueName: \"kubernetes.io/projected/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-kube-api-access-kbgd6\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.504254 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-log-httpd\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.504393 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.504660 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-run-httpd\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.504713 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-log-httpd\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.507890 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-scripts\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.508688 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.508776 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.509524 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-config-data\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.524606 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbgd6\" (UniqueName: \"kubernetes.io/projected/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-kube-api-access-kbgd6\") pod \"ceilometer-0\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.547295 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.990302 4900 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 12:53:08 crc kubenswrapper[4900]: I0127 12:53:08.990613 4900 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 12:53:09 crc kubenswrapper[4900]: I0127 12:53:09.061187 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:53:09 crc kubenswrapper[4900]: I0127 12:53:09.113160 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:09 crc kubenswrapper[4900]: I0127 12:53:09.153194 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:53:09 crc kubenswrapper[4900]: I0127 12:53:09.322820 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sq822"] Jan 27 12:53:10 crc kubenswrapper[4900]: I0127 12:53:10.005886 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1","Type":"ContainerStarted","Data":"2fe03a70e3cbeb62b6131c68e7195221ed27ffd96b59954678e1508a37f97e2e"} Jan 27 12:53:10 crc kubenswrapper[4900]: I0127 12:53:10.006573 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1","Type":"ContainerStarted","Data":"18911f1721cc82b3998c54a57de00cb45bf3d3a97df6e5cb35e8260bed123713"} Jan 27 12:53:10 crc kubenswrapper[4900]: I0127 12:53:10.768928 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 27 12:53:10 crc kubenswrapper[4900]: I0127 12:53:10.769350 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 27 12:53:10 crc kubenswrapper[4900]: I0127 12:53:10.826322 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 27 12:53:10 crc kubenswrapper[4900]: I0127 12:53:10.826890 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 27 12:53:10 crc kubenswrapper[4900]: I0127 12:53:10.967112 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 27 12:53:10 crc kubenswrapper[4900]: I0127 12:53:10.967261 4900 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 12:53:10 crc kubenswrapper[4900]: I0127 12:53:10.972371 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 27 12:53:11 crc kubenswrapper[4900]: I0127 12:53:11.021329 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1","Type":"ContainerStarted","Data":"f61e14443bbd44869885bf035feb05750c6c4a19c22e4098cb1f5ef1df579c61"} Jan 27 12:53:11 crc kubenswrapper[4900]: I0127 12:53:11.022354 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 27 12:53:11 crc kubenswrapper[4900]: I0127 12:53:11.022422 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 27 12:53:11 crc kubenswrapper[4900]: I0127 12:53:11.021541 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sq822" podUID="157a4f73-69ff-48a5-a97b-e78693201660" containerName="registry-server" containerID="cri-o://b3716c6c8c1f2b793bebb386b6f912db9b149e4d368f43331206eda276090c9d" gracePeriod=2 Jan 27 12:53:11 crc kubenswrapper[4900]: I0127 12:53:11.804161 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:53:11 crc kubenswrapper[4900]: I0127 12:53:11.909928 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/157a4f73-69ff-48a5-a97b-e78693201660-catalog-content\") pod \"157a4f73-69ff-48a5-a97b-e78693201660\" (UID: \"157a4f73-69ff-48a5-a97b-e78693201660\") " Jan 27 12:53:11 crc kubenswrapper[4900]: I0127 12:53:11.910023 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/157a4f73-69ff-48a5-a97b-e78693201660-utilities\") pod \"157a4f73-69ff-48a5-a97b-e78693201660\" (UID: \"157a4f73-69ff-48a5-a97b-e78693201660\") " Jan 27 12:53:11 crc kubenswrapper[4900]: I0127 12:53:11.910377 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2dcf\" (UniqueName: \"kubernetes.io/projected/157a4f73-69ff-48a5-a97b-e78693201660-kube-api-access-k2dcf\") pod \"157a4f73-69ff-48a5-a97b-e78693201660\" (UID: \"157a4f73-69ff-48a5-a97b-e78693201660\") " Jan 27 12:53:11 crc kubenswrapper[4900]: I0127 12:53:11.915507 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/157a4f73-69ff-48a5-a97b-e78693201660-utilities" (OuterVolumeSpecName: "utilities") pod "157a4f73-69ff-48a5-a97b-e78693201660" (UID: "157a4f73-69ff-48a5-a97b-e78693201660"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:53:11 crc kubenswrapper[4900]: I0127 12:53:11.920733 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/157a4f73-69ff-48a5-a97b-e78693201660-kube-api-access-k2dcf" (OuterVolumeSpecName: "kube-api-access-k2dcf") pod "157a4f73-69ff-48a5-a97b-e78693201660" (UID: "157a4f73-69ff-48a5-a97b-e78693201660"). InnerVolumeSpecName "kube-api-access-k2dcf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:53:11 crc kubenswrapper[4900]: I0127 12:53:11.949875 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:11 crc kubenswrapper[4900]: I0127 12:53:11.974903 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/157a4f73-69ff-48a5-a97b-e78693201660-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "157a4f73-69ff-48a5-a97b-e78693201660" (UID: "157a4f73-69ff-48a5-a97b-e78693201660"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.015043 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/157a4f73-69ff-48a5-a97b-e78693201660-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.015128 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/157a4f73-69ff-48a5-a97b-e78693201660-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.015141 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2dcf\" (UniqueName: \"kubernetes.io/projected/157a4f73-69ff-48a5-a97b-e78693201660-kube-api-access-k2dcf\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.043589 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1","Type":"ContainerStarted","Data":"59f95832fcc0d1d9491fb7d8608c8359b362ecbd8bf985d93b540a9b00a44ca9"} Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.051327 4900 generic.go:334] "Generic (PLEG): container finished" podID="157a4f73-69ff-48a5-a97b-e78693201660" containerID="b3716c6c8c1f2b793bebb386b6f912db9b149e4d368f43331206eda276090c9d" exitCode=0 Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.052223 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sq822" Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.052269 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sq822" event={"ID":"157a4f73-69ff-48a5-a97b-e78693201660","Type":"ContainerDied","Data":"b3716c6c8c1f2b793bebb386b6f912db9b149e4d368f43331206eda276090c9d"} Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.052374 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sq822" event={"ID":"157a4f73-69ff-48a5-a97b-e78693201660","Type":"ContainerDied","Data":"aed5315d3fa888c3603a215e1c7b789a1bf6543b75f3645a8a6a113e6997591b"} Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.052397 4900 scope.go:117] "RemoveContainer" containerID="b3716c6c8c1f2b793bebb386b6f912db9b149e4d368f43331206eda276090c9d" Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.082336 4900 scope.go:117] "RemoveContainer" containerID="b1796cdb447cf0f399fbe4ea02b2f019bba7188634e061e82a3b5dc4e09bdbc5" Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.111570 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sq822"] Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.124806 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sq822"] Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.135862 4900 scope.go:117] "RemoveContainer" containerID="0dc6a83ed35ec0a9ea55ee2fdc218c1aa9f85b453165941ad9c1938d886a5a32" Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.191688 4900 scope.go:117] "RemoveContainer" containerID="b3716c6c8c1f2b793bebb386b6f912db9b149e4d368f43331206eda276090c9d" Jan 27 12:53:12 crc kubenswrapper[4900]: E0127 12:53:12.192812 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3716c6c8c1f2b793bebb386b6f912db9b149e4d368f43331206eda276090c9d\": container with ID starting with b3716c6c8c1f2b793bebb386b6f912db9b149e4d368f43331206eda276090c9d not found: ID does not exist" containerID="b3716c6c8c1f2b793bebb386b6f912db9b149e4d368f43331206eda276090c9d" Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.192857 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3716c6c8c1f2b793bebb386b6f912db9b149e4d368f43331206eda276090c9d"} err="failed to get container status \"b3716c6c8c1f2b793bebb386b6f912db9b149e4d368f43331206eda276090c9d\": rpc error: code = NotFound desc = could not find container \"b3716c6c8c1f2b793bebb386b6f912db9b149e4d368f43331206eda276090c9d\": container with ID starting with b3716c6c8c1f2b793bebb386b6f912db9b149e4d368f43331206eda276090c9d not found: ID does not exist" Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.192898 4900 scope.go:117] "RemoveContainer" containerID="b1796cdb447cf0f399fbe4ea02b2f019bba7188634e061e82a3b5dc4e09bdbc5" Jan 27 12:53:12 crc kubenswrapper[4900]: E0127 12:53:12.198709 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1796cdb447cf0f399fbe4ea02b2f019bba7188634e061e82a3b5dc4e09bdbc5\": container with ID starting with b1796cdb447cf0f399fbe4ea02b2f019bba7188634e061e82a3b5dc4e09bdbc5 not found: ID does not exist" containerID="b1796cdb447cf0f399fbe4ea02b2f019bba7188634e061e82a3b5dc4e09bdbc5" Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.198836 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1796cdb447cf0f399fbe4ea02b2f019bba7188634e061e82a3b5dc4e09bdbc5"} err="failed to get container status \"b1796cdb447cf0f399fbe4ea02b2f019bba7188634e061e82a3b5dc4e09bdbc5\": rpc error: code = NotFound desc = could not find container \"b1796cdb447cf0f399fbe4ea02b2f019bba7188634e061e82a3b5dc4e09bdbc5\": container with ID starting with b1796cdb447cf0f399fbe4ea02b2f019bba7188634e061e82a3b5dc4e09bdbc5 not found: ID does not exist" Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.199004 4900 scope.go:117] "RemoveContainer" containerID="0dc6a83ed35ec0a9ea55ee2fdc218c1aa9f85b453165941ad9c1938d886a5a32" Jan 27 12:53:12 crc kubenswrapper[4900]: E0127 12:53:12.199967 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dc6a83ed35ec0a9ea55ee2fdc218c1aa9f85b453165941ad9c1938d886a5a32\": container with ID starting with 0dc6a83ed35ec0a9ea55ee2fdc218c1aa9f85b453165941ad9c1938d886a5a32 not found: ID does not exist" containerID="0dc6a83ed35ec0a9ea55ee2fdc218c1aa9f85b453165941ad9c1938d886a5a32" Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.200024 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dc6a83ed35ec0a9ea55ee2fdc218c1aa9f85b453165941ad9c1938d886a5a32"} err="failed to get container status \"0dc6a83ed35ec0a9ea55ee2fdc218c1aa9f85b453165941ad9c1938d886a5a32\": rpc error: code = NotFound desc = could not find container \"0dc6a83ed35ec0a9ea55ee2fdc218c1aa9f85b453165941ad9c1938d886a5a32\": container with ID starting with 0dc6a83ed35ec0a9ea55ee2fdc218c1aa9f85b453165941ad9c1938d886a5a32 not found: ID does not exist" Jan 27 12:53:12 crc kubenswrapper[4900]: I0127 12:53:12.499896 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="157a4f73-69ff-48a5-a97b-e78693201660" path="/var/lib/kubelet/pods/157a4f73-69ff-48a5-a97b-e78693201660/volumes" Jan 27 12:53:13 crc kubenswrapper[4900]: I0127 12:53:13.720710 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 27 12:53:13 crc kubenswrapper[4900]: I0127 12:53:13.720903 4900 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 27 12:53:13 crc kubenswrapper[4900]: I0127 12:53:13.824665 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 27 12:53:14 crc kubenswrapper[4900]: I0127 12:53:14.109887 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1","Type":"ContainerStarted","Data":"bb869d1f369a393f173636e2f229b27319dcd0e34b63bd810836122515a9ef40"} Jan 27 12:53:14 crc kubenswrapper[4900]: I0127 12:53:14.110182 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="ceilometer-central-agent" containerID="cri-o://2fe03a70e3cbeb62b6131c68e7195221ed27ffd96b59954678e1508a37f97e2e" gracePeriod=30 Jan 27 12:53:14 crc kubenswrapper[4900]: I0127 12:53:14.110216 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="proxy-httpd" containerID="cri-o://bb869d1f369a393f173636e2f229b27319dcd0e34b63bd810836122515a9ef40" gracePeriod=30 Jan 27 12:53:14 crc kubenswrapper[4900]: I0127 12:53:14.110288 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="sg-core" containerID="cri-o://59f95832fcc0d1d9491fb7d8608c8359b362ecbd8bf985d93b540a9b00a44ca9" gracePeriod=30 Jan 27 12:53:14 crc kubenswrapper[4900]: I0127 12:53:14.110329 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="ceilometer-notification-agent" containerID="cri-o://f61e14443bbd44869885bf035feb05750c6c4a19c22e4098cb1f5ef1df579c61" gracePeriod=30 Jan 27 12:53:14 crc kubenswrapper[4900]: I0127 12:53:14.162863 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.345380332 podStartE2EDuration="6.162833757s" podCreationTimestamp="2026-01-27 12:53:08 +0000 UTC" firstStartedPulling="2026-01-27 12:53:09.12239383 +0000 UTC m=+1616.359422040" lastFinishedPulling="2026-01-27 12:53:12.939847255 +0000 UTC m=+1620.176875465" observedRunningTime="2026-01-27 12:53:14.133788114 +0000 UTC m=+1621.370816324" watchObservedRunningTime="2026-01-27 12:53:14.162833757 +0000 UTC m=+1621.399861967" Jan 27 12:53:14 crc kubenswrapper[4900]: E0127 12:53:14.465349 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6bafdbe_0e65_4611_8dc8_d31ab73c05c1.slice/crio-bb869d1f369a393f173636e2f229b27319dcd0e34b63bd810836122515a9ef40.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6bafdbe_0e65_4611_8dc8_d31ab73c05c1.slice/crio-conmon-59f95832fcc0d1d9491fb7d8608c8359b362ecbd8bf985d93b540a9b00a44ca9.scope\": RecentStats: unable to find data in memory cache]" Jan 27 12:53:15 crc kubenswrapper[4900]: I0127 12:53:15.126520 4900 generic.go:334] "Generic (PLEG): container finished" podID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerID="bb869d1f369a393f173636e2f229b27319dcd0e34b63bd810836122515a9ef40" exitCode=0 Jan 27 12:53:15 crc kubenswrapper[4900]: I0127 12:53:15.126563 4900 generic.go:334] "Generic (PLEG): container finished" podID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerID="59f95832fcc0d1d9491fb7d8608c8359b362ecbd8bf985d93b540a9b00a44ca9" exitCode=2 Jan 27 12:53:15 crc kubenswrapper[4900]: I0127 12:53:15.126572 4900 generic.go:334] "Generic (PLEG): container finished" podID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerID="f61e14443bbd44869885bf035feb05750c6c4a19c22e4098cb1f5ef1df579c61" exitCode=0 Jan 27 12:53:15 crc kubenswrapper[4900]: I0127 12:53:15.126603 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1","Type":"ContainerDied","Data":"bb869d1f369a393f173636e2f229b27319dcd0e34b63bd810836122515a9ef40"} Jan 27 12:53:15 crc kubenswrapper[4900]: I0127 12:53:15.126657 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1","Type":"ContainerDied","Data":"59f95832fcc0d1d9491fb7d8608c8359b362ecbd8bf985d93b540a9b00a44ca9"} Jan 27 12:53:15 crc kubenswrapper[4900]: I0127 12:53:15.126668 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1","Type":"ContainerDied","Data":"f61e14443bbd44869885bf035feb05750c6c4a19c22e4098cb1f5ef1df579c61"} Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.677981 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-qlwsf"] Jan 27 12:53:19 crc kubenswrapper[4900]: E0127 12:53:19.679132 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="157a4f73-69ff-48a5-a97b-e78693201660" containerName="extract-utilities" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.679155 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="157a4f73-69ff-48a5-a97b-e78693201660" containerName="extract-utilities" Jan 27 12:53:19 crc kubenswrapper[4900]: E0127 12:53:19.679191 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="157a4f73-69ff-48a5-a97b-e78693201660" containerName="registry-server" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.679199 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="157a4f73-69ff-48a5-a97b-e78693201660" containerName="registry-server" Jan 27 12:53:19 crc kubenswrapper[4900]: E0127 12:53:19.679235 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="157a4f73-69ff-48a5-a97b-e78693201660" containerName="extract-content" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.679243 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="157a4f73-69ff-48a5-a97b-e78693201660" containerName="extract-content" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.679558 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="157a4f73-69ff-48a5-a97b-e78693201660" containerName="registry-server" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.680766 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-qlwsf" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.702716 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-qlwsf"] Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.756105 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca4e1441-8211-48f4-9c31-b0c519574afa-operator-scripts\") pod \"nova-api-db-create-qlwsf\" (UID: \"ca4e1441-8211-48f4-9c31-b0c519574afa\") " pod="openstack/nova-api-db-create-qlwsf" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.756349 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqk98\" (UniqueName: \"kubernetes.io/projected/ca4e1441-8211-48f4-9c31-b0c519574afa-kube-api-access-sqk98\") pod \"nova-api-db-create-qlwsf\" (UID: \"ca4e1441-8211-48f4-9c31-b0c519574afa\") " pod="openstack/nova-api-db-create-qlwsf" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.789122 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-l4tgz"] Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.791185 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-l4tgz" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.824468 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-l4tgz"] Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.859246 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bcsv\" (UniqueName: \"kubernetes.io/projected/c1f4bb23-786a-41d3-9b04-9e2cfba9d467-kube-api-access-2bcsv\") pod \"nova-cell0-db-create-l4tgz\" (UID: \"c1f4bb23-786a-41d3-9b04-9e2cfba9d467\") " pod="openstack/nova-cell0-db-create-l4tgz" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.859325 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca4e1441-8211-48f4-9c31-b0c519574afa-operator-scripts\") pod \"nova-api-db-create-qlwsf\" (UID: \"ca4e1441-8211-48f4-9c31-b0c519574afa\") " pod="openstack/nova-api-db-create-qlwsf" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.859417 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1f4bb23-786a-41d3-9b04-9e2cfba9d467-operator-scripts\") pod \"nova-cell0-db-create-l4tgz\" (UID: \"c1f4bb23-786a-41d3-9b04-9e2cfba9d467\") " pod="openstack/nova-cell0-db-create-l4tgz" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.859493 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqk98\" (UniqueName: \"kubernetes.io/projected/ca4e1441-8211-48f4-9c31-b0c519574afa-kube-api-access-sqk98\") pod \"nova-api-db-create-qlwsf\" (UID: \"ca4e1441-8211-48f4-9c31-b0c519574afa\") " pod="openstack/nova-api-db-create-qlwsf" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.860915 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca4e1441-8211-48f4-9c31-b0c519574afa-operator-scripts\") pod \"nova-api-db-create-qlwsf\" (UID: \"ca4e1441-8211-48f4-9c31-b0c519574afa\") " pod="openstack/nova-api-db-create-qlwsf" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.898280 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqk98\" (UniqueName: \"kubernetes.io/projected/ca4e1441-8211-48f4-9c31-b0c519574afa-kube-api-access-sqk98\") pod \"nova-api-db-create-qlwsf\" (UID: \"ca4e1441-8211-48f4-9c31-b0c519574afa\") " pod="openstack/nova-api-db-create-qlwsf" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.902200 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-kpqlh"] Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.904599 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-kpqlh" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.930776 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-kpqlh"] Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.970506 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db4b3937-5f67-437e-a73e-45ef279cd10b-operator-scripts\") pod \"nova-cell1-db-create-kpqlh\" (UID: \"db4b3937-5f67-437e-a73e-45ef279cd10b\") " pod="openstack/nova-cell1-db-create-kpqlh" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.970669 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bcsv\" (UniqueName: \"kubernetes.io/projected/c1f4bb23-786a-41d3-9b04-9e2cfba9d467-kube-api-access-2bcsv\") pod \"nova-cell0-db-create-l4tgz\" (UID: \"c1f4bb23-786a-41d3-9b04-9e2cfba9d467\") " pod="openstack/nova-cell0-db-create-l4tgz" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.970746 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1f4bb23-786a-41d3-9b04-9e2cfba9d467-operator-scripts\") pod \"nova-cell0-db-create-l4tgz\" (UID: \"c1f4bb23-786a-41d3-9b04-9e2cfba9d467\") " pod="openstack/nova-cell0-db-create-l4tgz" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.970788 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jdt9\" (UniqueName: \"kubernetes.io/projected/db4b3937-5f67-437e-a73e-45ef279cd10b-kube-api-access-8jdt9\") pod \"nova-cell1-db-create-kpqlh\" (UID: \"db4b3937-5f67-437e-a73e-45ef279cd10b\") " pod="openstack/nova-cell1-db-create-kpqlh" Jan 27 12:53:19 crc kubenswrapper[4900]: I0127 12:53:19.971811 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1f4bb23-786a-41d3-9b04-9e2cfba9d467-operator-scripts\") pod \"nova-cell0-db-create-l4tgz\" (UID: \"c1f4bb23-786a-41d3-9b04-9e2cfba9d467\") " pod="openstack/nova-cell0-db-create-l4tgz" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.008636 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bcsv\" (UniqueName: \"kubernetes.io/projected/c1f4bb23-786a-41d3-9b04-9e2cfba9d467-kube-api-access-2bcsv\") pod \"nova-cell0-db-create-l4tgz\" (UID: \"c1f4bb23-786a-41d3-9b04-9e2cfba9d467\") " pod="openstack/nova-cell0-db-create-l4tgz" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.015720 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-qlwsf" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.083271 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jdt9\" (UniqueName: \"kubernetes.io/projected/db4b3937-5f67-437e-a73e-45ef279cd10b-kube-api-access-8jdt9\") pod \"nova-cell1-db-create-kpqlh\" (UID: \"db4b3937-5f67-437e-a73e-45ef279cd10b\") " pod="openstack/nova-cell1-db-create-kpqlh" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.083642 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db4b3937-5f67-437e-a73e-45ef279cd10b-operator-scripts\") pod \"nova-cell1-db-create-kpqlh\" (UID: \"db4b3937-5f67-437e-a73e-45ef279cd10b\") " pod="openstack/nova-cell1-db-create-kpqlh" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.084990 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db4b3937-5f67-437e-a73e-45ef279cd10b-operator-scripts\") pod \"nova-cell1-db-create-kpqlh\" (UID: \"db4b3937-5f67-437e-a73e-45ef279cd10b\") " pod="openstack/nova-cell1-db-create-kpqlh" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.092119 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-a1f3-account-create-update-7rzqb"] Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.111944 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a1f3-account-create-update-7rzqb" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.126281 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.153121 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jdt9\" (UniqueName: \"kubernetes.io/projected/db4b3937-5f67-437e-a73e-45ef279cd10b-kube-api-access-8jdt9\") pod \"nova-cell1-db-create-kpqlh\" (UID: \"db4b3937-5f67-437e-a73e-45ef279cd10b\") " pod="openstack/nova-cell1-db-create-kpqlh" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.167511 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-a1f3-account-create-update-7rzqb"] Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.169120 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-l4tgz" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.220448 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bdcd763e-2447-4a8f-a234-223e5c09708d-operator-scripts\") pod \"nova-api-a1f3-account-create-update-7rzqb\" (UID: \"bdcd763e-2447-4a8f-a234-223e5c09708d\") " pod="openstack/nova-api-a1f3-account-create-update-7rzqb" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.220827 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jvx6\" (UniqueName: \"kubernetes.io/projected/bdcd763e-2447-4a8f-a234-223e5c09708d-kube-api-access-9jvx6\") pod \"nova-api-a1f3-account-create-update-7rzqb\" (UID: \"bdcd763e-2447-4a8f-a234-223e5c09708d\") " pod="openstack/nova-api-a1f3-account-create-update-7rzqb" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.323862 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bdcd763e-2447-4a8f-a234-223e5c09708d-operator-scripts\") pod \"nova-api-a1f3-account-create-update-7rzqb\" (UID: \"bdcd763e-2447-4a8f-a234-223e5c09708d\") " pod="openstack/nova-api-a1f3-account-create-update-7rzqb" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.324038 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jvx6\" (UniqueName: \"kubernetes.io/projected/bdcd763e-2447-4a8f-a234-223e5c09708d-kube-api-access-9jvx6\") pod \"nova-api-a1f3-account-create-update-7rzqb\" (UID: \"bdcd763e-2447-4a8f-a234-223e5c09708d\") " pod="openstack/nova-api-a1f3-account-create-update-7rzqb" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.325856 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bdcd763e-2447-4a8f-a234-223e5c09708d-operator-scripts\") pod \"nova-api-a1f3-account-create-update-7rzqb\" (UID: \"bdcd763e-2447-4a8f-a234-223e5c09708d\") " pod="openstack/nova-api-a1f3-account-create-update-7rzqb" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.327872 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-9113-account-create-update-gtf6n"] Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.330032 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-9113-account-create-update-gtf6n" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.336997 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-kpqlh" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.339143 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.363331 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jvx6\" (UniqueName: \"kubernetes.io/projected/bdcd763e-2447-4a8f-a234-223e5c09708d-kube-api-access-9jvx6\") pod \"nova-api-a1f3-account-create-update-7rzqb\" (UID: \"bdcd763e-2447-4a8f-a234-223e5c09708d\") " pod="openstack/nova-api-a1f3-account-create-update-7rzqb" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.369344 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-9113-account-create-update-gtf6n"] Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.426961 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2mdt\" (UniqueName: \"kubernetes.io/projected/9c80a52f-5539-4e14-9953-63525b30928d-kube-api-access-l2mdt\") pod \"nova-cell0-9113-account-create-update-gtf6n\" (UID: \"9c80a52f-5539-4e14-9953-63525b30928d\") " pod="openstack/nova-cell0-9113-account-create-update-gtf6n" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.427531 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c80a52f-5539-4e14-9953-63525b30928d-operator-scripts\") pod \"nova-cell0-9113-account-create-update-gtf6n\" (UID: \"9c80a52f-5539-4e14-9953-63525b30928d\") " pod="openstack/nova-cell0-9113-account-create-update-gtf6n" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.433144 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-0076-account-create-update-jz6pf"] Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.434856 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0076-account-create-update-jz6pf" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.440528 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.462855 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-0076-account-create-update-jz6pf"] Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.534818 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c80a52f-5539-4e14-9953-63525b30928d-operator-scripts\") pod \"nova-cell0-9113-account-create-update-gtf6n\" (UID: \"9c80a52f-5539-4e14-9953-63525b30928d\") " pod="openstack/nova-cell0-9113-account-create-update-gtf6n" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.534941 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2mdt\" (UniqueName: \"kubernetes.io/projected/9c80a52f-5539-4e14-9953-63525b30928d-kube-api-access-l2mdt\") pod \"nova-cell0-9113-account-create-update-gtf6n\" (UID: \"9c80a52f-5539-4e14-9953-63525b30928d\") " pod="openstack/nova-cell0-9113-account-create-update-gtf6n" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.535686 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/275a0058-147e-43c0-b109-5d036591eb61-operator-scripts\") pod \"nova-cell1-0076-account-create-update-jz6pf\" (UID: \"275a0058-147e-43c0-b109-5d036591eb61\") " pod="openstack/nova-cell1-0076-account-create-update-jz6pf" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.535789 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95xlf\" (UniqueName: \"kubernetes.io/projected/275a0058-147e-43c0-b109-5d036591eb61-kube-api-access-95xlf\") pod \"nova-cell1-0076-account-create-update-jz6pf\" (UID: \"275a0058-147e-43c0-b109-5d036591eb61\") " pod="openstack/nova-cell1-0076-account-create-update-jz6pf" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.536089 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c80a52f-5539-4e14-9953-63525b30928d-operator-scripts\") pod \"nova-cell0-9113-account-create-update-gtf6n\" (UID: \"9c80a52f-5539-4e14-9953-63525b30928d\") " pod="openstack/nova-cell0-9113-account-create-update-gtf6n" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.564709 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2mdt\" (UniqueName: \"kubernetes.io/projected/9c80a52f-5539-4e14-9953-63525b30928d-kube-api-access-l2mdt\") pod \"nova-cell0-9113-account-create-update-gtf6n\" (UID: \"9c80a52f-5539-4e14-9953-63525b30928d\") " pod="openstack/nova-cell0-9113-account-create-update-gtf6n" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.638846 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/275a0058-147e-43c0-b109-5d036591eb61-operator-scripts\") pod \"nova-cell1-0076-account-create-update-jz6pf\" (UID: \"275a0058-147e-43c0-b109-5d036591eb61\") " pod="openstack/nova-cell1-0076-account-create-update-jz6pf" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.639265 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95xlf\" (UniqueName: \"kubernetes.io/projected/275a0058-147e-43c0-b109-5d036591eb61-kube-api-access-95xlf\") pod \"nova-cell1-0076-account-create-update-jz6pf\" (UID: \"275a0058-147e-43c0-b109-5d036591eb61\") " pod="openstack/nova-cell1-0076-account-create-update-jz6pf" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.642014 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/275a0058-147e-43c0-b109-5d036591eb61-operator-scripts\") pod \"nova-cell1-0076-account-create-update-jz6pf\" (UID: \"275a0058-147e-43c0-b109-5d036591eb61\") " pod="openstack/nova-cell1-0076-account-create-update-jz6pf" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.660584 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a1f3-account-create-update-7rzqb" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.661239 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95xlf\" (UniqueName: \"kubernetes.io/projected/275a0058-147e-43c0-b109-5d036591eb61-kube-api-access-95xlf\") pod \"nova-cell1-0076-account-create-update-jz6pf\" (UID: \"275a0058-147e-43c0-b109-5d036591eb61\") " pod="openstack/nova-cell1-0076-account-create-update-jz6pf" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.681130 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-9113-account-create-update-gtf6n" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.788456 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0076-account-create-update-jz6pf" Jan 27 12:53:20 crc kubenswrapper[4900]: I0127 12:53:20.878616 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-qlwsf"] Jan 27 12:53:20 crc kubenswrapper[4900]: W0127 12:53:20.919236 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca4e1441_8211_48f4_9c31_b0c519574afa.slice/crio-0935b9513e627e13a861e4dd87ae978134ed0fb2cd5d64865fe9bf9a83fdb1ac WatchSource:0}: Error finding container 0935b9513e627e13a861e4dd87ae978134ed0fb2cd5d64865fe9bf9a83fdb1ac: Status 404 returned error can't find the container with id 0935b9513e627e13a861e4dd87ae978134ed0fb2cd5d64865fe9bf9a83fdb1ac Jan 27 12:53:21 crc kubenswrapper[4900]: I0127 12:53:21.087308 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-l4tgz"] Jan 27 12:53:21 crc kubenswrapper[4900]: I0127 12:53:21.113637 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-kpqlh"] Jan 27 12:53:21 crc kubenswrapper[4900]: I0127 12:53:21.370108 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-kpqlh" event={"ID":"db4b3937-5f67-437e-a73e-45ef279cd10b","Type":"ContainerStarted","Data":"a5de7d3bd10efc28939d8c5d6ba162bb3e3db7ac80fabdb9589fe72178297606"} Jan 27 12:53:21 crc kubenswrapper[4900]: I0127 12:53:21.372541 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-qlwsf" event={"ID":"ca4e1441-8211-48f4-9c31-b0c519574afa","Type":"ContainerStarted","Data":"0935b9513e627e13a861e4dd87ae978134ed0fb2cd5d64865fe9bf9a83fdb1ac"} Jan 27 12:53:21 crc kubenswrapper[4900]: I0127 12:53:21.374741 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-l4tgz" event={"ID":"c1f4bb23-786a-41d3-9b04-9e2cfba9d467","Type":"ContainerStarted","Data":"82220dca35523f0b22b8ae384711756c8f59a848c0251bebb862faf961cc20e9"} Jan 27 12:53:21 crc kubenswrapper[4900]: I0127 12:53:21.441924 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-a1f3-account-create-update-7rzqb"] Jan 27 12:53:21 crc kubenswrapper[4900]: I0127 12:53:21.466022 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-9113-account-create-update-gtf6n"] Jan 27 12:53:21 crc kubenswrapper[4900]: I0127 12:53:21.488225 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:53:21 crc kubenswrapper[4900]: E0127 12:53:21.488566 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:53:21 crc kubenswrapper[4900]: I0127 12:53:21.612569 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-0076-account-create-update-jz6pf"] Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.390993 4900 generic.go:334] "Generic (PLEG): container finished" podID="db4b3937-5f67-437e-a73e-45ef279cd10b" containerID="d3b64d582fdd7fb41976581774db0587dea47803fe8bfa1cbba673c4c4bc838c" exitCode=0 Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.392071 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-kpqlh" event={"ID":"db4b3937-5f67-437e-a73e-45ef279cd10b","Type":"ContainerDied","Data":"d3b64d582fdd7fb41976581774db0587dea47803fe8bfa1cbba673c4c4bc838c"} Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.402032 4900 generic.go:334] "Generic (PLEG): container finished" podID="ca4e1441-8211-48f4-9c31-b0c519574afa" containerID="07b28eaa38a70f43bd7c7d41b01065088402b9fc76c4d44b801511519d0f9031" exitCode=0 Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.402110 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-qlwsf" event={"ID":"ca4e1441-8211-48f4-9c31-b0c519574afa","Type":"ContainerDied","Data":"07b28eaa38a70f43bd7c7d41b01065088402b9fc76c4d44b801511519d0f9031"} Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.408643 4900 generic.go:334] "Generic (PLEG): container finished" podID="c1f4bb23-786a-41d3-9b04-9e2cfba9d467" containerID="651b848533f1c2f5ba8404b692194682878ead8131eaed347c67a4309726163a" exitCode=0 Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.408698 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-l4tgz" event={"ID":"c1f4bb23-786a-41d3-9b04-9e2cfba9d467","Type":"ContainerDied","Data":"651b848533f1c2f5ba8404b692194682878ead8131eaed347c67a4309726163a"} Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.418840 4900 generic.go:334] "Generic (PLEG): container finished" podID="9c80a52f-5539-4e14-9953-63525b30928d" containerID="9afc15016566d8161257b2a05b5224b48bb4374132701783236bc7ec289f9ba0" exitCode=0 Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.419008 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-9113-account-create-update-gtf6n" event={"ID":"9c80a52f-5539-4e14-9953-63525b30928d","Type":"ContainerDied","Data":"9afc15016566d8161257b2a05b5224b48bb4374132701783236bc7ec289f9ba0"} Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.419074 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-9113-account-create-update-gtf6n" event={"ID":"9c80a52f-5539-4e14-9953-63525b30928d","Type":"ContainerStarted","Data":"f3554e966c02eaa7021cbb8b31cd61411012b11e2298ae4986d93edfe7ff3d2a"} Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.424882 4900 generic.go:334] "Generic (PLEG): container finished" podID="bdcd763e-2447-4a8f-a234-223e5c09708d" containerID="2145cd4eb32f67a2363145b3bb8236bbfe717d049018e3d42f90de5619b713b2" exitCode=0 Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.425157 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-a1f3-account-create-update-7rzqb" event={"ID":"bdcd763e-2447-4a8f-a234-223e5c09708d","Type":"ContainerDied","Data":"2145cd4eb32f67a2363145b3bb8236bbfe717d049018e3d42f90de5619b713b2"} Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.425196 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-a1f3-account-create-update-7rzqb" event={"ID":"bdcd763e-2447-4a8f-a234-223e5c09708d","Type":"ContainerStarted","Data":"21304fdcc71e872ca4fee64f34e5ae42548841ccfe3db540068006f2708efe3a"} Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.428654 4900 generic.go:334] "Generic (PLEG): container finished" podID="275a0058-147e-43c0-b109-5d036591eb61" containerID="64c8085e10dd82ad7e63f992b6e0dbd1401d00a1203c97c4b3e60850e098f82d" exitCode=0 Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.428735 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0076-account-create-update-jz6pf" event={"ID":"275a0058-147e-43c0-b109-5d036591eb61","Type":"ContainerDied","Data":"64c8085e10dd82ad7e63f992b6e0dbd1401d00a1203c97c4b3e60850e098f82d"} Jan 27 12:53:22 crc kubenswrapper[4900]: I0127 12:53:22.428783 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0076-account-create-update-jz6pf" event={"ID":"275a0058-147e-43c0-b109-5d036591eb61","Type":"ContainerStarted","Data":"b2c0d3cb0743f055615482809cfe0c00a994ef744ba2ec061a62ad4cd84704bf"} Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.053233 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.190101 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-combined-ca-bundle\") pod \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.190169 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-config-data\") pod \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.190260 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-log-httpd\") pod \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.190388 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-run-httpd\") pod \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.190420 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-scripts\") pod \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.190471 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbgd6\" (UniqueName: \"kubernetes.io/projected/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-kube-api-access-kbgd6\") pod \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.190520 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-sg-core-conf-yaml\") pod \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\" (UID: \"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1\") " Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.190849 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" (UID: "b6bafdbe-0e65-4611-8dc8-d31ab73c05c1"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.190980 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" (UID: "b6bafdbe-0e65-4611-8dc8-d31ab73c05c1"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.191411 4900 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.191427 4900 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.195945 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-scripts" (OuterVolumeSpecName: "scripts") pod "b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" (UID: "b6bafdbe-0e65-4611-8dc8-d31ab73c05c1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.210626 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-kube-api-access-kbgd6" (OuterVolumeSpecName: "kube-api-access-kbgd6") pod "b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" (UID: "b6bafdbe-0e65-4611-8dc8-d31ab73c05c1"). InnerVolumeSpecName "kube-api-access-kbgd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.228382 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" (UID: "b6bafdbe-0e65-4611-8dc8-d31ab73c05c1"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.293755 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.293794 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbgd6\" (UniqueName: \"kubernetes.io/projected/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-kube-api-access-kbgd6\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.293811 4900 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.304021 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" (UID: "b6bafdbe-0e65-4611-8dc8-d31ab73c05c1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.336472 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-config-data" (OuterVolumeSpecName: "config-data") pod "b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" (UID: "b6bafdbe-0e65-4611-8dc8-d31ab73c05c1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.396344 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.396384 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.441187 4900 generic.go:334] "Generic (PLEG): container finished" podID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerID="2fe03a70e3cbeb62b6131c68e7195221ed27ffd96b59954678e1508a37f97e2e" exitCode=0 Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.441296 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.441340 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1","Type":"ContainerDied","Data":"2fe03a70e3cbeb62b6131c68e7195221ed27ffd96b59954678e1508a37f97e2e"} Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.441384 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bafdbe-0e65-4611-8dc8-d31ab73c05c1","Type":"ContainerDied","Data":"18911f1721cc82b3998c54a57de00cb45bf3d3a97df6e5cb35e8260bed123713"} Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.441410 4900 scope.go:117] "RemoveContainer" containerID="bb869d1f369a393f173636e2f229b27319dcd0e34b63bd810836122515a9ef40" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.507577 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.523412 4900 scope.go:117] "RemoveContainer" containerID="59f95832fcc0d1d9491fb7d8608c8359b362ecbd8bf985d93b540a9b00a44ca9" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.535943 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.555722 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:23 crc kubenswrapper[4900]: E0127 12:53:23.556843 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="ceilometer-central-agent" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.556870 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="ceilometer-central-agent" Jan 27 12:53:23 crc kubenswrapper[4900]: E0127 12:53:23.556889 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="ceilometer-notification-agent" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.556898 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="ceilometer-notification-agent" Jan 27 12:53:23 crc kubenswrapper[4900]: E0127 12:53:23.556915 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="proxy-httpd" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.556923 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="proxy-httpd" Jan 27 12:53:23 crc kubenswrapper[4900]: E0127 12:53:23.556946 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="sg-core" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.556953 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="sg-core" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.557307 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="proxy-httpd" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.557337 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="sg-core" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.557364 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="ceilometer-central-agent" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.557376 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" containerName="ceilometer-notification-agent" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.563143 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.569970 4900 scope.go:117] "RemoveContainer" containerID="f61e14443bbd44869885bf035feb05750c6c4a19c22e4098cb1f5ef1df579c61" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.570635 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.570645 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.582848 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.625707 4900 scope.go:117] "RemoveContainer" containerID="2fe03a70e3cbeb62b6131c68e7195221ed27ffd96b59954678e1508a37f97e2e" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.675309 4900 scope.go:117] "RemoveContainer" containerID="bb869d1f369a393f173636e2f229b27319dcd0e34b63bd810836122515a9ef40" Jan 27 12:53:23 crc kubenswrapper[4900]: E0127 12:53:23.676458 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb869d1f369a393f173636e2f229b27319dcd0e34b63bd810836122515a9ef40\": container with ID starting with bb869d1f369a393f173636e2f229b27319dcd0e34b63bd810836122515a9ef40 not found: ID does not exist" containerID="bb869d1f369a393f173636e2f229b27319dcd0e34b63bd810836122515a9ef40" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.676609 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb869d1f369a393f173636e2f229b27319dcd0e34b63bd810836122515a9ef40"} err="failed to get container status \"bb869d1f369a393f173636e2f229b27319dcd0e34b63bd810836122515a9ef40\": rpc error: code = NotFound desc = could not find container \"bb869d1f369a393f173636e2f229b27319dcd0e34b63bd810836122515a9ef40\": container with ID starting with bb869d1f369a393f173636e2f229b27319dcd0e34b63bd810836122515a9ef40 not found: ID does not exist" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.676645 4900 scope.go:117] "RemoveContainer" containerID="59f95832fcc0d1d9491fb7d8608c8359b362ecbd8bf985d93b540a9b00a44ca9" Jan 27 12:53:23 crc kubenswrapper[4900]: E0127 12:53:23.680529 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59f95832fcc0d1d9491fb7d8608c8359b362ecbd8bf985d93b540a9b00a44ca9\": container with ID starting with 59f95832fcc0d1d9491fb7d8608c8359b362ecbd8bf985d93b540a9b00a44ca9 not found: ID does not exist" containerID="59f95832fcc0d1d9491fb7d8608c8359b362ecbd8bf985d93b540a9b00a44ca9" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.680564 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59f95832fcc0d1d9491fb7d8608c8359b362ecbd8bf985d93b540a9b00a44ca9"} err="failed to get container status \"59f95832fcc0d1d9491fb7d8608c8359b362ecbd8bf985d93b540a9b00a44ca9\": rpc error: code = NotFound desc = could not find container \"59f95832fcc0d1d9491fb7d8608c8359b362ecbd8bf985d93b540a9b00a44ca9\": container with ID starting with 59f95832fcc0d1d9491fb7d8608c8359b362ecbd8bf985d93b540a9b00a44ca9 not found: ID does not exist" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.680583 4900 scope.go:117] "RemoveContainer" containerID="f61e14443bbd44869885bf035feb05750c6c4a19c22e4098cb1f5ef1df579c61" Jan 27 12:53:23 crc kubenswrapper[4900]: E0127 12:53:23.681345 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f61e14443bbd44869885bf035feb05750c6c4a19c22e4098cb1f5ef1df579c61\": container with ID starting with f61e14443bbd44869885bf035feb05750c6c4a19c22e4098cb1f5ef1df579c61 not found: ID does not exist" containerID="f61e14443bbd44869885bf035feb05750c6c4a19c22e4098cb1f5ef1df579c61" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.681385 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f61e14443bbd44869885bf035feb05750c6c4a19c22e4098cb1f5ef1df579c61"} err="failed to get container status \"f61e14443bbd44869885bf035feb05750c6c4a19c22e4098cb1f5ef1df579c61\": rpc error: code = NotFound desc = could not find container \"f61e14443bbd44869885bf035feb05750c6c4a19c22e4098cb1f5ef1df579c61\": container with ID starting with f61e14443bbd44869885bf035feb05750c6c4a19c22e4098cb1f5ef1df579c61 not found: ID does not exist" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.681403 4900 scope.go:117] "RemoveContainer" containerID="2fe03a70e3cbeb62b6131c68e7195221ed27ffd96b59954678e1508a37f97e2e" Jan 27 12:53:23 crc kubenswrapper[4900]: E0127 12:53:23.681759 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fe03a70e3cbeb62b6131c68e7195221ed27ffd96b59954678e1508a37f97e2e\": container with ID starting with 2fe03a70e3cbeb62b6131c68e7195221ed27ffd96b59954678e1508a37f97e2e not found: ID does not exist" containerID="2fe03a70e3cbeb62b6131c68e7195221ed27ffd96b59954678e1508a37f97e2e" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.681815 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fe03a70e3cbeb62b6131c68e7195221ed27ffd96b59954678e1508a37f97e2e"} err="failed to get container status \"2fe03a70e3cbeb62b6131c68e7195221ed27ffd96b59954678e1508a37f97e2e\": rpc error: code = NotFound desc = could not find container \"2fe03a70e3cbeb62b6131c68e7195221ed27ffd96b59954678e1508a37f97e2e\": container with ID starting with 2fe03a70e3cbeb62b6131c68e7195221ed27ffd96b59954678e1508a37f97e2e not found: ID does not exist" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.708803 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpmzc\" (UniqueName: \"kubernetes.io/projected/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-kube-api-access-tpmzc\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.708955 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-config-data\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.708988 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-run-httpd\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.709098 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-scripts\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.709176 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.709204 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-log-httpd\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.709300 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.811332 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-run-httpd\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.811449 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-scripts\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.811513 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.811541 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-log-httpd\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.811640 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.811671 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpmzc\" (UniqueName: \"kubernetes.io/projected/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-kube-api-access-tpmzc\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.811752 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-config-data\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.813366 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-log-httpd\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.814016 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-run-httpd\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.818811 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.821282 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-scripts\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.822449 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-config-data\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.833816 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.837111 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpmzc\" (UniqueName: \"kubernetes.io/projected/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-kube-api-access-tpmzc\") pod \"ceilometer-0\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.892112 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:53:23 crc kubenswrapper[4900]: I0127 12:53:23.897784 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a1f3-account-create-update-7rzqb" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.015406 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jvx6\" (UniqueName: \"kubernetes.io/projected/bdcd763e-2447-4a8f-a234-223e5c09708d-kube-api-access-9jvx6\") pod \"bdcd763e-2447-4a8f-a234-223e5c09708d\" (UID: \"bdcd763e-2447-4a8f-a234-223e5c09708d\") " Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.015913 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bdcd763e-2447-4a8f-a234-223e5c09708d-operator-scripts\") pod \"bdcd763e-2447-4a8f-a234-223e5c09708d\" (UID: \"bdcd763e-2447-4a8f-a234-223e5c09708d\") " Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.019092 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bdcd763e-2447-4a8f-a234-223e5c09708d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bdcd763e-2447-4a8f-a234-223e5c09708d" (UID: "bdcd763e-2447-4a8f-a234-223e5c09708d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.023551 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdcd763e-2447-4a8f-a234-223e5c09708d-kube-api-access-9jvx6" (OuterVolumeSpecName: "kube-api-access-9jvx6") pod "bdcd763e-2447-4a8f-a234-223e5c09708d" (UID: "bdcd763e-2447-4a8f-a234-223e5c09708d"). InnerVolumeSpecName "kube-api-access-9jvx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.121217 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jvx6\" (UniqueName: \"kubernetes.io/projected/bdcd763e-2447-4a8f-a234-223e5c09708d-kube-api-access-9jvx6\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.121446 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bdcd763e-2447-4a8f-a234-223e5c09708d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.437429 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-kpqlh" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.451228 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-l4tgz" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.472194 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0076-account-create-update-jz6pf" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.472337 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-kpqlh" event={"ID":"db4b3937-5f67-437e-a73e-45ef279cd10b","Type":"ContainerDied","Data":"a5de7d3bd10efc28939d8c5d6ba162bb3e3db7ac80fabdb9589fe72178297606"} Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.472386 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a5de7d3bd10efc28939d8c5d6ba162bb3e3db7ac80fabdb9589fe72178297606" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.472383 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-kpqlh" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.475230 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-qlwsf" event={"ID":"ca4e1441-8211-48f4-9c31-b0c519574afa","Type":"ContainerDied","Data":"0935b9513e627e13a861e4dd87ae978134ed0fb2cd5d64865fe9bf9a83fdb1ac"} Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.475263 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0935b9513e627e13a861e4dd87ae978134ed0fb2cd5d64865fe9bf9a83fdb1ac" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.477947 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-l4tgz" event={"ID":"c1f4bb23-786a-41d3-9b04-9e2cfba9d467","Type":"ContainerDied","Data":"82220dca35523f0b22b8ae384711756c8f59a848c0251bebb862faf961cc20e9"} Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.477976 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82220dca35523f0b22b8ae384711756c8f59a848c0251bebb862faf961cc20e9" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.478043 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-l4tgz" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.503512 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-9113-account-create-update-gtf6n" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.506366 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a1f3-account-create-update-7rzqb" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.519789 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6bafdbe-0e65-4611-8dc8-d31ab73c05c1" path="/var/lib/kubelet/pods/b6bafdbe-0e65-4611-8dc8-d31ab73c05c1/volumes" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.523793 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-0076-account-create-update-jz6pf" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.524734 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-qlwsf" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.578065 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-9113-account-create-update-gtf6n" event={"ID":"9c80a52f-5539-4e14-9953-63525b30928d","Type":"ContainerDied","Data":"f3554e966c02eaa7021cbb8b31cd61411012b11e2298ae4986d93edfe7ff3d2a"} Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.578112 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f3554e966c02eaa7021cbb8b31cd61411012b11e2298ae4986d93edfe7ff3d2a" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.578141 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-a1f3-account-create-update-7rzqb" event={"ID":"bdcd763e-2447-4a8f-a234-223e5c09708d","Type":"ContainerDied","Data":"21304fdcc71e872ca4fee64f34e5ae42548841ccfe3db540068006f2708efe3a"} Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.578152 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21304fdcc71e872ca4fee64f34e5ae42548841ccfe3db540068006f2708efe3a" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.578160 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-0076-account-create-update-jz6pf" event={"ID":"275a0058-147e-43c0-b109-5d036591eb61","Type":"ContainerDied","Data":"b2c0d3cb0743f055615482809cfe0c00a994ef744ba2ec061a62ad4cd84704bf"} Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.578172 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2c0d3cb0743f055615482809cfe0c00a994ef744ba2ec061a62ad4cd84704bf" Jan 27 12:53:24 crc kubenswrapper[4900]: W0127 12:53:24.663119 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74a4dd06_b3a3_43dd_bd7e_a87c35a5bc1f.slice/crio-62026b805d95fa2dfc4213109dc5a1c9827b07637b2c20a3bf78c00fb2424e4b WatchSource:0}: Error finding container 62026b805d95fa2dfc4213109dc5a1c9827b07637b2c20a3bf78c00fb2424e4b: Status 404 returned error can't find the container with id 62026b805d95fa2dfc4213109dc5a1c9827b07637b2c20a3bf78c00fb2424e4b Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.665070 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.676973 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db4b3937-5f67-437e-a73e-45ef279cd10b-operator-scripts\") pod \"db4b3937-5f67-437e-a73e-45ef279cd10b\" (UID: \"db4b3937-5f67-437e-a73e-45ef279cd10b\") " Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.677092 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1f4bb23-786a-41d3-9b04-9e2cfba9d467-operator-scripts\") pod \"c1f4bb23-786a-41d3-9b04-9e2cfba9d467\" (UID: \"c1f4bb23-786a-41d3-9b04-9e2cfba9d467\") " Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.677207 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/275a0058-147e-43c0-b109-5d036591eb61-operator-scripts\") pod \"275a0058-147e-43c0-b109-5d036591eb61\" (UID: \"275a0058-147e-43c0-b109-5d036591eb61\") " Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.677255 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95xlf\" (UniqueName: \"kubernetes.io/projected/275a0058-147e-43c0-b109-5d036591eb61-kube-api-access-95xlf\") pod \"275a0058-147e-43c0-b109-5d036591eb61\" (UID: \"275a0058-147e-43c0-b109-5d036591eb61\") " Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.677344 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c80a52f-5539-4e14-9953-63525b30928d-operator-scripts\") pod \"9c80a52f-5539-4e14-9953-63525b30928d\" (UID: \"9c80a52f-5539-4e14-9953-63525b30928d\") " Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.677395 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8jdt9\" (UniqueName: \"kubernetes.io/projected/db4b3937-5f67-437e-a73e-45ef279cd10b-kube-api-access-8jdt9\") pod \"db4b3937-5f67-437e-a73e-45ef279cd10b\" (UID: \"db4b3937-5f67-437e-a73e-45ef279cd10b\") " Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.677447 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2mdt\" (UniqueName: \"kubernetes.io/projected/9c80a52f-5539-4e14-9953-63525b30928d-kube-api-access-l2mdt\") pod \"9c80a52f-5539-4e14-9953-63525b30928d\" (UID: \"9c80a52f-5539-4e14-9953-63525b30928d\") " Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.677603 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bcsv\" (UniqueName: \"kubernetes.io/projected/c1f4bb23-786a-41d3-9b04-9e2cfba9d467-kube-api-access-2bcsv\") pod \"c1f4bb23-786a-41d3-9b04-9e2cfba9d467\" (UID: \"c1f4bb23-786a-41d3-9b04-9e2cfba9d467\") " Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.677740 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca4e1441-8211-48f4-9c31-b0c519574afa-operator-scripts\") pod \"ca4e1441-8211-48f4-9c31-b0c519574afa\" (UID: \"ca4e1441-8211-48f4-9c31-b0c519574afa\") " Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.677784 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqk98\" (UniqueName: \"kubernetes.io/projected/ca4e1441-8211-48f4-9c31-b0c519574afa-kube-api-access-sqk98\") pod \"ca4e1441-8211-48f4-9c31-b0c519574afa\" (UID: \"ca4e1441-8211-48f4-9c31-b0c519574afa\") " Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.678922 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1f4bb23-786a-41d3-9b04-9e2cfba9d467-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c1f4bb23-786a-41d3-9b04-9e2cfba9d467" (UID: "c1f4bb23-786a-41d3-9b04-9e2cfba9d467"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.679444 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db4b3937-5f67-437e-a73e-45ef279cd10b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "db4b3937-5f67-437e-a73e-45ef279cd10b" (UID: "db4b3937-5f67-437e-a73e-45ef279cd10b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.680031 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca4e1441-8211-48f4-9c31-b0c519574afa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ca4e1441-8211-48f4-9c31-b0c519574afa" (UID: "ca4e1441-8211-48f4-9c31-b0c519574afa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.680218 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/275a0058-147e-43c0-b109-5d036591eb61-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "275a0058-147e-43c0-b109-5d036591eb61" (UID: "275a0058-147e-43c0-b109-5d036591eb61"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.680370 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c80a52f-5539-4e14-9953-63525b30928d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9c80a52f-5539-4e14-9953-63525b30928d" (UID: "9c80a52f-5539-4e14-9953-63525b30928d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.686649 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db4b3937-5f67-437e-a73e-45ef279cd10b-kube-api-access-8jdt9" (OuterVolumeSpecName: "kube-api-access-8jdt9") pod "db4b3937-5f67-437e-a73e-45ef279cd10b" (UID: "db4b3937-5f67-437e-a73e-45ef279cd10b"). InnerVolumeSpecName "kube-api-access-8jdt9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.686829 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c80a52f-5539-4e14-9953-63525b30928d-kube-api-access-l2mdt" (OuterVolumeSpecName: "kube-api-access-l2mdt") pod "9c80a52f-5539-4e14-9953-63525b30928d" (UID: "9c80a52f-5539-4e14-9953-63525b30928d"). InnerVolumeSpecName "kube-api-access-l2mdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.686951 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/275a0058-147e-43c0-b109-5d036591eb61-kube-api-access-95xlf" (OuterVolumeSpecName: "kube-api-access-95xlf") pod "275a0058-147e-43c0-b109-5d036591eb61" (UID: "275a0058-147e-43c0-b109-5d036591eb61"). InnerVolumeSpecName "kube-api-access-95xlf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.691497 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca4e1441-8211-48f4-9c31-b0c519574afa-kube-api-access-sqk98" (OuterVolumeSpecName: "kube-api-access-sqk98") pod "ca4e1441-8211-48f4-9c31-b0c519574afa" (UID: "ca4e1441-8211-48f4-9c31-b0c519574afa"). InnerVolumeSpecName "kube-api-access-sqk98". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.691552 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1f4bb23-786a-41d3-9b04-9e2cfba9d467-kube-api-access-2bcsv" (OuterVolumeSpecName: "kube-api-access-2bcsv") pod "c1f4bb23-786a-41d3-9b04-9e2cfba9d467" (UID: "c1f4bb23-786a-41d3-9b04-9e2cfba9d467"). InnerVolumeSpecName "kube-api-access-2bcsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.780396 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95xlf\" (UniqueName: \"kubernetes.io/projected/275a0058-147e-43c0-b109-5d036591eb61-kube-api-access-95xlf\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.780449 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c80a52f-5539-4e14-9953-63525b30928d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.780464 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8jdt9\" (UniqueName: \"kubernetes.io/projected/db4b3937-5f67-437e-a73e-45ef279cd10b-kube-api-access-8jdt9\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.780476 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2mdt\" (UniqueName: \"kubernetes.io/projected/9c80a52f-5539-4e14-9953-63525b30928d-kube-api-access-l2mdt\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.780489 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bcsv\" (UniqueName: \"kubernetes.io/projected/c1f4bb23-786a-41d3-9b04-9e2cfba9d467-kube-api-access-2bcsv\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.780502 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca4e1441-8211-48f4-9c31-b0c519574afa-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.780514 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sqk98\" (UniqueName: \"kubernetes.io/projected/ca4e1441-8211-48f4-9c31-b0c519574afa-kube-api-access-sqk98\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.780526 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db4b3937-5f67-437e-a73e-45ef279cd10b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.780673 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1f4bb23-786a-41d3-9b04-9e2cfba9d467-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:24 crc kubenswrapper[4900]: I0127 12:53:24.780691 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/275a0058-147e-43c0-b109-5d036591eb61-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:24 crc kubenswrapper[4900]: E0127 12:53:24.884445 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbdcd763e_2447_4a8f_a234_223e5c09708d.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbdcd763e_2447_4a8f_a234_223e5c09708d.slice/crio-21304fdcc71e872ca4fee64f34e5ae42548841ccfe3db540068006f2708efe3a\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1f4bb23_786a_41d3_9b04_9e2cfba9d467.slice\": RecentStats: unable to find data in memory cache]" Jan 27 12:53:25 crc kubenswrapper[4900]: I0127 12:53:25.544312 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f","Type":"ContainerStarted","Data":"ba9c6cc7076318e62567f71a22b71102145e396f654e8fbf3839453a0c4d670c"} Jan 27 12:53:25 crc kubenswrapper[4900]: I0127 12:53:25.544842 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f","Type":"ContainerStarted","Data":"62026b805d95fa2dfc4213109dc5a1c9827b07637b2c20a3bf78c00fb2424e4b"} Jan 27 12:53:25 crc kubenswrapper[4900]: I0127 12:53:25.544449 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-9113-account-create-update-gtf6n" Jan 27 12:53:25 crc kubenswrapper[4900]: I0127 12:53:25.544413 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-qlwsf" Jan 27 12:53:26 crc kubenswrapper[4900]: I0127 12:53:26.560444 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f","Type":"ContainerStarted","Data":"3d0810c2c9394dc272b4b4a0f9d262064efd3417a86d4e301191823997837ff1"} Jan 27 12:53:27 crc kubenswrapper[4900]: I0127 12:53:27.596156 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f","Type":"ContainerStarted","Data":"32150186af65a431fd66074f59d235fcb8aeb258d8c318a698c93354530ad44e"} Jan 27 12:53:28 crc kubenswrapper[4900]: I0127 12:53:28.610486 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f","Type":"ContainerStarted","Data":"026ab122d800efd6d485e4e74ed6155761f4094b4974b86fcd863a60c579d7bc"} Jan 27 12:53:28 crc kubenswrapper[4900]: I0127 12:53:28.611317 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 12:53:28 crc kubenswrapper[4900]: I0127 12:53:28.634586 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.23167461 podStartE2EDuration="5.634557356s" podCreationTimestamp="2026-01-27 12:53:23 +0000 UTC" firstStartedPulling="2026-01-27 12:53:24.670711675 +0000 UTC m=+1631.907739885" lastFinishedPulling="2026-01-27 12:53:28.073594411 +0000 UTC m=+1635.310622631" observedRunningTime="2026-01-27 12:53:28.631415165 +0000 UTC m=+1635.868443375" watchObservedRunningTime="2026-01-27 12:53:28.634557356 +0000 UTC m=+1635.871585566" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.736925 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-hj679"] Jan 27 12:53:30 crc kubenswrapper[4900]: E0127 12:53:30.738864 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdcd763e-2447-4a8f-a234-223e5c09708d" containerName="mariadb-account-create-update" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.738895 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdcd763e-2447-4a8f-a234-223e5c09708d" containerName="mariadb-account-create-update" Jan 27 12:53:30 crc kubenswrapper[4900]: E0127 12:53:30.748951 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db4b3937-5f67-437e-a73e-45ef279cd10b" containerName="mariadb-database-create" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.749031 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="db4b3937-5f67-437e-a73e-45ef279cd10b" containerName="mariadb-database-create" Jan 27 12:53:30 crc kubenswrapper[4900]: E0127 12:53:30.749139 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1f4bb23-786a-41d3-9b04-9e2cfba9d467" containerName="mariadb-database-create" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.749150 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1f4bb23-786a-41d3-9b04-9e2cfba9d467" containerName="mariadb-database-create" Jan 27 12:53:30 crc kubenswrapper[4900]: E0127 12:53:30.749219 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c80a52f-5539-4e14-9953-63525b30928d" containerName="mariadb-account-create-update" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.749228 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c80a52f-5539-4e14-9953-63525b30928d" containerName="mariadb-account-create-update" Jan 27 12:53:30 crc kubenswrapper[4900]: E0127 12:53:30.757480 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="275a0058-147e-43c0-b109-5d036591eb61" containerName="mariadb-account-create-update" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.757498 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="275a0058-147e-43c0-b109-5d036591eb61" containerName="mariadb-account-create-update" Jan 27 12:53:30 crc kubenswrapper[4900]: E0127 12:53:30.757531 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca4e1441-8211-48f4-9c31-b0c519574afa" containerName="mariadb-database-create" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.757539 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca4e1441-8211-48f4-9c31-b0c519574afa" containerName="mariadb-database-create" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.770939 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdcd763e-2447-4a8f-a234-223e5c09708d" containerName="mariadb-account-create-update" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.771012 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="275a0058-147e-43c0-b109-5d036591eb61" containerName="mariadb-account-create-update" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.771037 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca4e1441-8211-48f4-9c31-b0c519574afa" containerName="mariadb-database-create" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.771086 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="db4b3937-5f67-437e-a73e-45ef279cd10b" containerName="mariadb-database-create" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.771130 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1f4bb23-786a-41d3-9b04-9e2cfba9d467" containerName="mariadb-database-create" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.771169 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c80a52f-5539-4e14-9953-63525b30928d" containerName="mariadb-account-create-update" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.779266 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.785532 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.785704 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.786095 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-tjhb5" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.792578 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-hj679"] Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.929305 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-scripts\") pod \"nova-cell0-conductor-db-sync-hj679\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.929369 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-config-data\") pod \"nova-cell0-conductor-db-sync-hj679\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.930069 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-hj679\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:30 crc kubenswrapper[4900]: I0127 12:53:30.930444 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bcwd\" (UniqueName: \"kubernetes.io/projected/7007f6f8-047f-404d-9094-1ba8d95238a6-kube-api-access-8bcwd\") pod \"nova-cell0-conductor-db-sync-hj679\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:31 crc kubenswrapper[4900]: I0127 12:53:31.033491 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-hj679\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:31 crc kubenswrapper[4900]: I0127 12:53:31.033635 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bcwd\" (UniqueName: \"kubernetes.io/projected/7007f6f8-047f-404d-9094-1ba8d95238a6-kube-api-access-8bcwd\") pod \"nova-cell0-conductor-db-sync-hj679\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:31 crc kubenswrapper[4900]: I0127 12:53:31.033726 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-scripts\") pod \"nova-cell0-conductor-db-sync-hj679\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:31 crc kubenswrapper[4900]: I0127 12:53:31.033761 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-config-data\") pod \"nova-cell0-conductor-db-sync-hj679\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:31 crc kubenswrapper[4900]: I0127 12:53:31.041957 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-scripts\") pod \"nova-cell0-conductor-db-sync-hj679\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:31 crc kubenswrapper[4900]: I0127 12:53:31.044102 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-hj679\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:31 crc kubenswrapper[4900]: I0127 12:53:31.053596 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-config-data\") pod \"nova-cell0-conductor-db-sync-hj679\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:31 crc kubenswrapper[4900]: I0127 12:53:31.058942 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bcwd\" (UniqueName: \"kubernetes.io/projected/7007f6f8-047f-404d-9094-1ba8d95238a6-kube-api-access-8bcwd\") pod \"nova-cell0-conductor-db-sync-hj679\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:31 crc kubenswrapper[4900]: I0127 12:53:31.138942 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:31 crc kubenswrapper[4900]: I0127 12:53:31.718001 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-hj679"] Jan 27 12:53:32 crc kubenswrapper[4900]: I0127 12:53:32.701169 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-hj679" event={"ID":"7007f6f8-047f-404d-9094-1ba8d95238a6","Type":"ContainerStarted","Data":"a447f585842e5dbf519d8d9c4f96488075c1ae2552560ef943aa2a177bb61a40"} Jan 27 12:53:36 crc kubenswrapper[4900]: I0127 12:53:36.504865 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:53:36 crc kubenswrapper[4900]: E0127 12:53:36.505721 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:53:38 crc kubenswrapper[4900]: I0127 12:53:38.906360 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:38 crc kubenswrapper[4900]: I0127 12:53:38.908661 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="ceilometer-central-agent" containerID="cri-o://ba9c6cc7076318e62567f71a22b71102145e396f654e8fbf3839453a0c4d670c" gracePeriod=30 Jan 27 12:53:38 crc kubenswrapper[4900]: I0127 12:53:38.909830 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="sg-core" containerID="cri-o://32150186af65a431fd66074f59d235fcb8aeb258d8c318a698c93354530ad44e" gracePeriod=30 Jan 27 12:53:38 crc kubenswrapper[4900]: I0127 12:53:38.910108 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="ceilometer-notification-agent" containerID="cri-o://3d0810c2c9394dc272b4b4a0f9d262064efd3417a86d4e301191823997837ff1" gracePeriod=30 Jan 27 12:53:38 crc kubenswrapper[4900]: I0127 12:53:38.910233 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="proxy-httpd" containerID="cri-o://026ab122d800efd6d485e4e74ed6155761f4094b4974b86fcd863a60c579d7bc" gracePeriod=30 Jan 27 12:53:38 crc kubenswrapper[4900]: I0127 12:53:38.920456 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.236:3000/\": EOF" Jan 27 12:53:39 crc kubenswrapper[4900]: I0127 12:53:39.811533 4900 generic.go:334] "Generic (PLEG): container finished" podID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerID="026ab122d800efd6d485e4e74ed6155761f4094b4974b86fcd863a60c579d7bc" exitCode=0 Jan 27 12:53:39 crc kubenswrapper[4900]: I0127 12:53:39.811881 4900 generic.go:334] "Generic (PLEG): container finished" podID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerID="32150186af65a431fd66074f59d235fcb8aeb258d8c318a698c93354530ad44e" exitCode=2 Jan 27 12:53:39 crc kubenswrapper[4900]: I0127 12:53:39.811891 4900 generic.go:334] "Generic (PLEG): container finished" podID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerID="ba9c6cc7076318e62567f71a22b71102145e396f654e8fbf3839453a0c4d670c" exitCode=0 Jan 27 12:53:39 crc kubenswrapper[4900]: I0127 12:53:39.811922 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f","Type":"ContainerDied","Data":"026ab122d800efd6d485e4e74ed6155761f4094b4974b86fcd863a60c579d7bc"} Jan 27 12:53:39 crc kubenswrapper[4900]: I0127 12:53:39.811953 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f","Type":"ContainerDied","Data":"32150186af65a431fd66074f59d235fcb8aeb258d8c318a698c93354530ad44e"} Jan 27 12:53:39 crc kubenswrapper[4900]: I0127 12:53:39.811964 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f","Type":"ContainerDied","Data":"ba9c6cc7076318e62567f71a22b71102145e396f654e8fbf3839453a0c4d670c"} Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.711405 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.773688 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-config-data\") pod \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.777343 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-log-httpd\") pod \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.777547 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-scripts\") pod \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.777744 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-sg-core-conf-yaml\") pod \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.777816 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tpmzc\" (UniqueName: \"kubernetes.io/projected/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-kube-api-access-tpmzc\") pod \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.777851 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-combined-ca-bundle\") pod \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.777909 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-run-httpd\") pod \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\" (UID: \"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f\") " Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.779908 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" (UID: "74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.780513 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" (UID: "74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.785256 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-kube-api-access-tpmzc" (OuterVolumeSpecName: "kube-api-access-tpmzc") pod "74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" (UID: "74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f"). InnerVolumeSpecName "kube-api-access-tpmzc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.788191 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-scripts" (OuterVolumeSpecName: "scripts") pod "74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" (UID: "74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.817876 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" (UID: "74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.838562 4900 generic.go:334] "Generic (PLEG): container finished" podID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerID="3d0810c2c9394dc272b4b4a0f9d262064efd3417a86d4e301191823997837ff1" exitCode=0 Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.838722 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.842345 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f","Type":"ContainerDied","Data":"3d0810c2c9394dc272b4b4a0f9d262064efd3417a86d4e301191823997837ff1"} Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.842442 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f","Type":"ContainerDied","Data":"62026b805d95fa2dfc4213109dc5a1c9827b07637b2c20a3bf78c00fb2424e4b"} Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.842470 4900 scope.go:117] "RemoveContainer" containerID="026ab122d800efd6d485e4e74ed6155761f4094b4974b86fcd863a60c579d7bc" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.848721 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-hj679" event={"ID":"7007f6f8-047f-404d-9094-1ba8d95238a6","Type":"ContainerStarted","Data":"8d1022e1c5663d3c9ead4b366419850ad65cbe2406946b46a003d9845f0e9fb4"} Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.873508 4900 scope.go:117] "RemoveContainer" containerID="32150186af65a431fd66074f59d235fcb8aeb258d8c318a698c93354530ad44e" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.945304 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-hj679" podStartSLOduration=2.362944645 podStartE2EDuration="11.945276661s" podCreationTimestamp="2026-01-27 12:53:30 +0000 UTC" firstStartedPulling="2026-01-27 12:53:31.728609671 +0000 UTC m=+1638.965637881" lastFinishedPulling="2026-01-27 12:53:41.310941687 +0000 UTC m=+1648.547969897" observedRunningTime="2026-01-27 12:53:41.873675404 +0000 UTC m=+1649.110703624" watchObservedRunningTime="2026-01-27 12:53:41.945276661 +0000 UTC m=+1649.182304871" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.950408 4900 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.950435 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.950446 4900 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.950458 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tpmzc\" (UniqueName: \"kubernetes.io/projected/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-kube-api-access-tpmzc\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.950470 4900 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.982139 4900 scope.go:117] "RemoveContainer" containerID="3d0810c2c9394dc272b4b4a0f9d262064efd3417a86d4e301191823997837ff1" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.988744 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" (UID: "74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:41 crc kubenswrapper[4900]: I0127 12:53:41.992146 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-config-data" (OuterVolumeSpecName: "config-data") pod "74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" (UID: "74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.007619 4900 scope.go:117] "RemoveContainer" containerID="ba9c6cc7076318e62567f71a22b71102145e396f654e8fbf3839453a0c4d670c" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.035332 4900 scope.go:117] "RemoveContainer" containerID="026ab122d800efd6d485e4e74ed6155761f4094b4974b86fcd863a60c579d7bc" Jan 27 12:53:42 crc kubenswrapper[4900]: E0127 12:53:42.036678 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"026ab122d800efd6d485e4e74ed6155761f4094b4974b86fcd863a60c579d7bc\": container with ID starting with 026ab122d800efd6d485e4e74ed6155761f4094b4974b86fcd863a60c579d7bc not found: ID does not exist" containerID="026ab122d800efd6d485e4e74ed6155761f4094b4974b86fcd863a60c579d7bc" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.036756 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"026ab122d800efd6d485e4e74ed6155761f4094b4974b86fcd863a60c579d7bc"} err="failed to get container status \"026ab122d800efd6d485e4e74ed6155761f4094b4974b86fcd863a60c579d7bc\": rpc error: code = NotFound desc = could not find container \"026ab122d800efd6d485e4e74ed6155761f4094b4974b86fcd863a60c579d7bc\": container with ID starting with 026ab122d800efd6d485e4e74ed6155761f4094b4974b86fcd863a60c579d7bc not found: ID does not exist" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.036801 4900 scope.go:117] "RemoveContainer" containerID="32150186af65a431fd66074f59d235fcb8aeb258d8c318a698c93354530ad44e" Jan 27 12:53:42 crc kubenswrapper[4900]: E0127 12:53:42.037654 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32150186af65a431fd66074f59d235fcb8aeb258d8c318a698c93354530ad44e\": container with ID starting with 32150186af65a431fd66074f59d235fcb8aeb258d8c318a698c93354530ad44e not found: ID does not exist" containerID="32150186af65a431fd66074f59d235fcb8aeb258d8c318a698c93354530ad44e" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.037693 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32150186af65a431fd66074f59d235fcb8aeb258d8c318a698c93354530ad44e"} err="failed to get container status \"32150186af65a431fd66074f59d235fcb8aeb258d8c318a698c93354530ad44e\": rpc error: code = NotFound desc = could not find container \"32150186af65a431fd66074f59d235fcb8aeb258d8c318a698c93354530ad44e\": container with ID starting with 32150186af65a431fd66074f59d235fcb8aeb258d8c318a698c93354530ad44e not found: ID does not exist" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.037715 4900 scope.go:117] "RemoveContainer" containerID="3d0810c2c9394dc272b4b4a0f9d262064efd3417a86d4e301191823997837ff1" Jan 27 12:53:42 crc kubenswrapper[4900]: E0127 12:53:42.038310 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d0810c2c9394dc272b4b4a0f9d262064efd3417a86d4e301191823997837ff1\": container with ID starting with 3d0810c2c9394dc272b4b4a0f9d262064efd3417a86d4e301191823997837ff1 not found: ID does not exist" containerID="3d0810c2c9394dc272b4b4a0f9d262064efd3417a86d4e301191823997837ff1" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.038353 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d0810c2c9394dc272b4b4a0f9d262064efd3417a86d4e301191823997837ff1"} err="failed to get container status \"3d0810c2c9394dc272b4b4a0f9d262064efd3417a86d4e301191823997837ff1\": rpc error: code = NotFound desc = could not find container \"3d0810c2c9394dc272b4b4a0f9d262064efd3417a86d4e301191823997837ff1\": container with ID starting with 3d0810c2c9394dc272b4b4a0f9d262064efd3417a86d4e301191823997837ff1 not found: ID does not exist" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.038384 4900 scope.go:117] "RemoveContainer" containerID="ba9c6cc7076318e62567f71a22b71102145e396f654e8fbf3839453a0c4d670c" Jan 27 12:53:42 crc kubenswrapper[4900]: E0127 12:53:42.038853 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba9c6cc7076318e62567f71a22b71102145e396f654e8fbf3839453a0c4d670c\": container with ID starting with ba9c6cc7076318e62567f71a22b71102145e396f654e8fbf3839453a0c4d670c not found: ID does not exist" containerID="ba9c6cc7076318e62567f71a22b71102145e396f654e8fbf3839453a0c4d670c" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.038890 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba9c6cc7076318e62567f71a22b71102145e396f654e8fbf3839453a0c4d670c"} err="failed to get container status \"ba9c6cc7076318e62567f71a22b71102145e396f654e8fbf3839453a0c4d670c\": rpc error: code = NotFound desc = could not find container \"ba9c6cc7076318e62567f71a22b71102145e396f654e8fbf3839453a0c4d670c\": container with ID starting with ba9c6cc7076318e62567f71a22b71102145e396f654e8fbf3839453a0c4d670c not found: ID does not exist" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.053126 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.053171 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.201006 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.213693 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.225512 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:42 crc kubenswrapper[4900]: E0127 12:53:42.226306 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="ceilometer-notification-agent" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.226333 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="ceilometer-notification-agent" Jan 27 12:53:42 crc kubenswrapper[4900]: E0127 12:53:42.226367 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="proxy-httpd" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.226376 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="proxy-httpd" Jan 27 12:53:42 crc kubenswrapper[4900]: E0127 12:53:42.226420 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="ceilometer-central-agent" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.226429 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="ceilometer-central-agent" Jan 27 12:53:42 crc kubenswrapper[4900]: E0127 12:53:42.226456 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="sg-core" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.226464 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="sg-core" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.226783 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="ceilometer-notification-agent" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.226821 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="proxy-httpd" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.226834 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="ceilometer-central-agent" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.226856 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" containerName="sg-core" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.230054 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.233913 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.234202 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.244048 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.258532 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.258842 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-config-data\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.258961 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4bb3c3d-78b6-46c9-b197-04efff895f36-log-httpd\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.259052 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxkfh\" (UniqueName: \"kubernetes.io/projected/b4bb3c3d-78b6-46c9-b197-04efff895f36-kube-api-access-dxkfh\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.259271 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4bb3c3d-78b6-46c9-b197-04efff895f36-run-httpd\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.259359 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.259502 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-scripts\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.361364 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-config-data\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.361410 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4bb3c3d-78b6-46c9-b197-04efff895f36-log-httpd\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.361447 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxkfh\" (UniqueName: \"kubernetes.io/projected/b4bb3c3d-78b6-46c9-b197-04efff895f36-kube-api-access-dxkfh\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.361510 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4bb3c3d-78b6-46c9-b197-04efff895f36-run-httpd\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.361597 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.361679 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-scripts\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.361777 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.361980 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4bb3c3d-78b6-46c9-b197-04efff895f36-run-httpd\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.362110 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4bb3c3d-78b6-46c9-b197-04efff895f36-log-httpd\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.368694 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-scripts\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.369235 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-config-data\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.370442 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.382756 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.383899 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxkfh\" (UniqueName: \"kubernetes.io/projected/b4bb3c3d-78b6-46c9-b197-04efff895f36-kube-api-access-dxkfh\") pod \"ceilometer-0\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " pod="openstack/ceilometer-0" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.500580 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f" path="/var/lib/kubelet/pods/74a4dd06-b3a3-43dd-bd7e-a87c35a5bc1f/volumes" Jan 27 12:53:42 crc kubenswrapper[4900]: I0127 12:53:42.616235 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:53:43 crc kubenswrapper[4900]: W0127 12:53:43.166076 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4bb3c3d_78b6_46c9_b197_04efff895f36.slice/crio-dddbb46aa35825ea36a5356daf0e5ae9658e86950502d53f7ca67e5d9b201c7b WatchSource:0}: Error finding container dddbb46aa35825ea36a5356daf0e5ae9658e86950502d53f7ca67e5d9b201c7b: Status 404 returned error can't find the container with id dddbb46aa35825ea36a5356daf0e5ae9658e86950502d53f7ca67e5d9b201c7b Jan 27 12:53:43 crc kubenswrapper[4900]: I0127 12:53:43.168923 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:53:43 crc kubenswrapper[4900]: I0127 12:53:43.169044 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 12:53:43 crc kubenswrapper[4900]: I0127 12:53:43.929855 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4bb3c3d-78b6-46c9-b197-04efff895f36","Type":"ContainerStarted","Data":"dddbb46aa35825ea36a5356daf0e5ae9658e86950502d53f7ca67e5d9b201c7b"} Jan 27 12:53:44 crc kubenswrapper[4900]: I0127 12:53:44.943577 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4bb3c3d-78b6-46c9-b197-04efff895f36","Type":"ContainerStarted","Data":"c6ba07dc6e560cbde49eed285716318f408ade3993351613835465f47b753adf"} Jan 27 12:53:45 crc kubenswrapper[4900]: I0127 12:53:45.846454 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-cj2cd"] Jan 27 12:53:45 crc kubenswrapper[4900]: I0127 12:53:45.849178 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-cj2cd" Jan 27 12:53:45 crc kubenswrapper[4900]: I0127 12:53:45.863417 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-cj2cd"] Jan 27 12:53:45 crc kubenswrapper[4900]: I0127 12:53:45.917796 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kn7j\" (UniqueName: \"kubernetes.io/projected/74aca284-ea33-4059-9175-c32b2bee89dc-kube-api-access-9kn7j\") pod \"aodh-db-create-cj2cd\" (UID: \"74aca284-ea33-4059-9175-c32b2bee89dc\") " pod="openstack/aodh-db-create-cj2cd" Jan 27 12:53:45 crc kubenswrapper[4900]: I0127 12:53:45.917951 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74aca284-ea33-4059-9175-c32b2bee89dc-operator-scripts\") pod \"aodh-db-create-cj2cd\" (UID: \"74aca284-ea33-4059-9175-c32b2bee89dc\") " pod="openstack/aodh-db-create-cj2cd" Jan 27 12:53:45 crc kubenswrapper[4900]: I0127 12:53:45.918192 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-955f-account-create-update-gvjq7"] Jan 27 12:53:45 crc kubenswrapper[4900]: I0127 12:53:45.919640 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-955f-account-create-update-gvjq7" Jan 27 12:53:45 crc kubenswrapper[4900]: I0127 12:53:45.921752 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Jan 27 12:53:45 crc kubenswrapper[4900]: I0127 12:53:45.945207 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-955f-account-create-update-gvjq7"] Jan 27 12:53:46 crc kubenswrapper[4900]: I0127 12:53:46.021105 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77jz9\" (UniqueName: \"kubernetes.io/projected/65191a52-e21a-4953-acb8-1e524a4bcfa7-kube-api-access-77jz9\") pod \"aodh-955f-account-create-update-gvjq7\" (UID: \"65191a52-e21a-4953-acb8-1e524a4bcfa7\") " pod="openstack/aodh-955f-account-create-update-gvjq7" Jan 27 12:53:46 crc kubenswrapper[4900]: I0127 12:53:46.021202 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9kn7j\" (UniqueName: \"kubernetes.io/projected/74aca284-ea33-4059-9175-c32b2bee89dc-kube-api-access-9kn7j\") pod \"aodh-db-create-cj2cd\" (UID: \"74aca284-ea33-4059-9175-c32b2bee89dc\") " pod="openstack/aodh-db-create-cj2cd" Jan 27 12:53:46 crc kubenswrapper[4900]: I0127 12:53:46.021405 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74aca284-ea33-4059-9175-c32b2bee89dc-operator-scripts\") pod \"aodh-db-create-cj2cd\" (UID: \"74aca284-ea33-4059-9175-c32b2bee89dc\") " pod="openstack/aodh-db-create-cj2cd" Jan 27 12:53:46 crc kubenswrapper[4900]: I0127 12:53:46.021469 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65191a52-e21a-4953-acb8-1e524a4bcfa7-operator-scripts\") pod \"aodh-955f-account-create-update-gvjq7\" (UID: \"65191a52-e21a-4953-acb8-1e524a4bcfa7\") " pod="openstack/aodh-955f-account-create-update-gvjq7" Jan 27 12:53:46 crc kubenswrapper[4900]: I0127 12:53:46.022920 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74aca284-ea33-4059-9175-c32b2bee89dc-operator-scripts\") pod \"aodh-db-create-cj2cd\" (UID: \"74aca284-ea33-4059-9175-c32b2bee89dc\") " pod="openstack/aodh-db-create-cj2cd" Jan 27 12:53:46 crc kubenswrapper[4900]: I0127 12:53:46.026332 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4bb3c3d-78b6-46c9-b197-04efff895f36","Type":"ContainerStarted","Data":"ffde071f4e8f2341a93f616a55f75635c6d75edc4c9cec7b14f3d3034c3581fd"} Jan 27 12:53:46 crc kubenswrapper[4900]: I0127 12:53:46.043647 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kn7j\" (UniqueName: \"kubernetes.io/projected/74aca284-ea33-4059-9175-c32b2bee89dc-kube-api-access-9kn7j\") pod \"aodh-db-create-cj2cd\" (UID: \"74aca284-ea33-4059-9175-c32b2bee89dc\") " pod="openstack/aodh-db-create-cj2cd" Jan 27 12:53:46 crc kubenswrapper[4900]: I0127 12:53:46.124673 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65191a52-e21a-4953-acb8-1e524a4bcfa7-operator-scripts\") pod \"aodh-955f-account-create-update-gvjq7\" (UID: \"65191a52-e21a-4953-acb8-1e524a4bcfa7\") " pod="openstack/aodh-955f-account-create-update-gvjq7" Jan 27 12:53:46 crc kubenswrapper[4900]: I0127 12:53:46.126149 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65191a52-e21a-4953-acb8-1e524a4bcfa7-operator-scripts\") pod \"aodh-955f-account-create-update-gvjq7\" (UID: \"65191a52-e21a-4953-acb8-1e524a4bcfa7\") " pod="openstack/aodh-955f-account-create-update-gvjq7" Jan 27 12:53:46 crc kubenswrapper[4900]: I0127 12:53:46.128820 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77jz9\" (UniqueName: \"kubernetes.io/projected/65191a52-e21a-4953-acb8-1e524a4bcfa7-kube-api-access-77jz9\") pod \"aodh-955f-account-create-update-gvjq7\" (UID: \"65191a52-e21a-4953-acb8-1e524a4bcfa7\") " pod="openstack/aodh-955f-account-create-update-gvjq7" Jan 27 12:53:46 crc kubenswrapper[4900]: I0127 12:53:46.136966 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-cj2cd" Jan 27 12:53:46 crc kubenswrapper[4900]: I0127 12:53:46.160806 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77jz9\" (UniqueName: \"kubernetes.io/projected/65191a52-e21a-4953-acb8-1e524a4bcfa7-kube-api-access-77jz9\") pod \"aodh-955f-account-create-update-gvjq7\" (UID: \"65191a52-e21a-4953-acb8-1e524a4bcfa7\") " pod="openstack/aodh-955f-account-create-update-gvjq7" Jan 27 12:53:46 crc kubenswrapper[4900]: I0127 12:53:46.450501 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-955f-account-create-update-gvjq7" Jan 27 12:53:46 crc kubenswrapper[4900]: I0127 12:53:46.820821 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-cj2cd"] Jan 27 12:53:47 crc kubenswrapper[4900]: I0127 12:53:47.071014 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4bb3c3d-78b6-46c9-b197-04efff895f36","Type":"ContainerStarted","Data":"853b9d9b02d1d4fbab8509ee530a89baa7d96fc03c4e0b0675d4ad924177d3fd"} Jan 27 12:53:47 crc kubenswrapper[4900]: I0127 12:53:47.083475 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-cj2cd" event={"ID":"74aca284-ea33-4059-9175-c32b2bee89dc","Type":"ContainerStarted","Data":"f00a2abe1330d7ad449f69da7667f55390fd4006e166ddac020287bece4b364a"} Jan 27 12:53:47 crc kubenswrapper[4900]: I0127 12:53:47.112891 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-cj2cd" podStartSLOduration=2.112867516 podStartE2EDuration="2.112867516s" podCreationTimestamp="2026-01-27 12:53:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:53:47.109049205 +0000 UTC m=+1654.346077415" watchObservedRunningTime="2026-01-27 12:53:47.112867516 +0000 UTC m=+1654.349895726" Jan 27 12:53:47 crc kubenswrapper[4900]: I0127 12:53:47.169818 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-955f-account-create-update-gvjq7"] Jan 27 12:53:48 crc kubenswrapper[4900]: I0127 12:53:48.099492 4900 generic.go:334] "Generic (PLEG): container finished" podID="74aca284-ea33-4059-9175-c32b2bee89dc" containerID="4de009bad480a9183448966c140c5d2899955bb954c2a0f683c730af714203ff" exitCode=0 Jan 27 12:53:48 crc kubenswrapper[4900]: I0127 12:53:48.099574 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-cj2cd" event={"ID":"74aca284-ea33-4059-9175-c32b2bee89dc","Type":"ContainerDied","Data":"4de009bad480a9183448966c140c5d2899955bb954c2a0f683c730af714203ff"} Jan 27 12:53:48 crc kubenswrapper[4900]: I0127 12:53:48.101988 4900 generic.go:334] "Generic (PLEG): container finished" podID="65191a52-e21a-4953-acb8-1e524a4bcfa7" containerID="e7008520d5f3b95fb7d0cdd1ee037fd301dba078b9179618ff5b319557f9a286" exitCode=0 Jan 27 12:53:48 crc kubenswrapper[4900]: I0127 12:53:48.102037 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-955f-account-create-update-gvjq7" event={"ID":"65191a52-e21a-4953-acb8-1e524a4bcfa7","Type":"ContainerDied","Data":"e7008520d5f3b95fb7d0cdd1ee037fd301dba078b9179618ff5b319557f9a286"} Jan 27 12:53:48 crc kubenswrapper[4900]: I0127 12:53:48.102185 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-955f-account-create-update-gvjq7" event={"ID":"65191a52-e21a-4953-acb8-1e524a4bcfa7","Type":"ContainerStarted","Data":"579aed043c0893133990d6a9cb646bd94d287c8b5ac529e3a331b22e5d1e2d8d"} Jan 27 12:53:49 crc kubenswrapper[4900]: I0127 12:53:49.116246 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4bb3c3d-78b6-46c9-b197-04efff895f36","Type":"ContainerStarted","Data":"6a1d2d743d496d79c9c29a8295a4e7afd63532fb0c5fe7b419c850ea92894647"} Jan 27 12:53:49 crc kubenswrapper[4900]: I0127 12:53:49.148049 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.5666815339999998 podStartE2EDuration="7.14801801s" podCreationTimestamp="2026-01-27 12:53:42 +0000 UTC" firstStartedPulling="2026-01-27 12:53:43.168752737 +0000 UTC m=+1650.405780947" lastFinishedPulling="2026-01-27 12:53:47.750089213 +0000 UTC m=+1654.987117423" observedRunningTime="2026-01-27 12:53:49.142935292 +0000 UTC m=+1656.379963512" watchObservedRunningTime="2026-01-27 12:53:49.14801801 +0000 UTC m=+1656.385046230" Jan 27 12:53:49 crc kubenswrapper[4900]: I0127 12:53:49.780309 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-cj2cd" Jan 27 12:53:49 crc kubenswrapper[4900]: I0127 12:53:49.799869 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-955f-account-create-update-gvjq7" Jan 27 12:53:49 crc kubenswrapper[4900]: I0127 12:53:49.917831 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74aca284-ea33-4059-9175-c32b2bee89dc-operator-scripts\") pod \"74aca284-ea33-4059-9175-c32b2bee89dc\" (UID: \"74aca284-ea33-4059-9175-c32b2bee89dc\") " Jan 27 12:53:49 crc kubenswrapper[4900]: I0127 12:53:49.917898 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77jz9\" (UniqueName: \"kubernetes.io/projected/65191a52-e21a-4953-acb8-1e524a4bcfa7-kube-api-access-77jz9\") pod \"65191a52-e21a-4953-acb8-1e524a4bcfa7\" (UID: \"65191a52-e21a-4953-acb8-1e524a4bcfa7\") " Jan 27 12:53:49 crc kubenswrapper[4900]: I0127 12:53:49.917930 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9kn7j\" (UniqueName: \"kubernetes.io/projected/74aca284-ea33-4059-9175-c32b2bee89dc-kube-api-access-9kn7j\") pod \"74aca284-ea33-4059-9175-c32b2bee89dc\" (UID: \"74aca284-ea33-4059-9175-c32b2bee89dc\") " Jan 27 12:53:49 crc kubenswrapper[4900]: I0127 12:53:49.917966 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65191a52-e21a-4953-acb8-1e524a4bcfa7-operator-scripts\") pod \"65191a52-e21a-4953-acb8-1e524a4bcfa7\" (UID: \"65191a52-e21a-4953-acb8-1e524a4bcfa7\") " Jan 27 12:53:49 crc kubenswrapper[4900]: I0127 12:53:49.918405 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74aca284-ea33-4059-9175-c32b2bee89dc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "74aca284-ea33-4059-9175-c32b2bee89dc" (UID: "74aca284-ea33-4059-9175-c32b2bee89dc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:53:49 crc kubenswrapper[4900]: I0127 12:53:49.918833 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65191a52-e21a-4953-acb8-1e524a4bcfa7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "65191a52-e21a-4953-acb8-1e524a4bcfa7" (UID: "65191a52-e21a-4953-acb8-1e524a4bcfa7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:53:49 crc kubenswrapper[4900]: I0127 12:53:49.919914 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74aca284-ea33-4059-9175-c32b2bee89dc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:49 crc kubenswrapper[4900]: I0127 12:53:49.919952 4900 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65191a52-e21a-4953-acb8-1e524a4bcfa7-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:49 crc kubenswrapper[4900]: I0127 12:53:49.926300 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74aca284-ea33-4059-9175-c32b2bee89dc-kube-api-access-9kn7j" (OuterVolumeSpecName: "kube-api-access-9kn7j") pod "74aca284-ea33-4059-9175-c32b2bee89dc" (UID: "74aca284-ea33-4059-9175-c32b2bee89dc"). InnerVolumeSpecName "kube-api-access-9kn7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:53:49 crc kubenswrapper[4900]: I0127 12:53:49.926457 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65191a52-e21a-4953-acb8-1e524a4bcfa7-kube-api-access-77jz9" (OuterVolumeSpecName: "kube-api-access-77jz9") pod "65191a52-e21a-4953-acb8-1e524a4bcfa7" (UID: "65191a52-e21a-4953-acb8-1e524a4bcfa7"). InnerVolumeSpecName "kube-api-access-77jz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:53:50 crc kubenswrapper[4900]: I0127 12:53:50.022163 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77jz9\" (UniqueName: \"kubernetes.io/projected/65191a52-e21a-4953-acb8-1e524a4bcfa7-kube-api-access-77jz9\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:50 crc kubenswrapper[4900]: I0127 12:53:50.022202 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9kn7j\" (UniqueName: \"kubernetes.io/projected/74aca284-ea33-4059-9175-c32b2bee89dc-kube-api-access-9kn7j\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:50 crc kubenswrapper[4900]: I0127 12:53:50.131524 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-955f-account-create-update-gvjq7" Jan 27 12:53:50 crc kubenswrapper[4900]: I0127 12:53:50.132859 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-955f-account-create-update-gvjq7" event={"ID":"65191a52-e21a-4953-acb8-1e524a4bcfa7","Type":"ContainerDied","Data":"579aed043c0893133990d6a9cb646bd94d287c8b5ac529e3a331b22e5d1e2d8d"} Jan 27 12:53:50 crc kubenswrapper[4900]: I0127 12:53:50.132926 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="579aed043c0893133990d6a9cb646bd94d287c8b5ac529e3a331b22e5d1e2d8d" Jan 27 12:53:50 crc kubenswrapper[4900]: I0127 12:53:50.135962 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-cj2cd" event={"ID":"74aca284-ea33-4059-9175-c32b2bee89dc","Type":"ContainerDied","Data":"f00a2abe1330d7ad449f69da7667f55390fd4006e166ddac020287bece4b364a"} Jan 27 12:53:50 crc kubenswrapper[4900]: I0127 12:53:50.136001 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f00a2abe1330d7ad449f69da7667f55390fd4006e166ddac020287bece4b364a" Jan 27 12:53:50 crc kubenswrapper[4900]: I0127 12:53:50.136021 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-cj2cd" Jan 27 12:53:50 crc kubenswrapper[4900]: I0127 12:53:50.136196 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.484819 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-vbjbt"] Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.485332 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:53:51 crc kubenswrapper[4900]: E0127 12:53:51.485587 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65191a52-e21a-4953-acb8-1e524a4bcfa7" containerName="mariadb-account-create-update" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.485638 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="65191a52-e21a-4953-acb8-1e524a4bcfa7" containerName="mariadb-account-create-update" Jan 27 12:53:51 crc kubenswrapper[4900]: E0127 12:53:51.485678 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74aca284-ea33-4059-9175-c32b2bee89dc" containerName="mariadb-database-create" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.485701 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="74aca284-ea33-4059-9175-c32b2bee89dc" containerName="mariadb-database-create" Jan 27 12:53:51 crc kubenswrapper[4900]: E0127 12:53:51.485695 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.486352 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="74aca284-ea33-4059-9175-c32b2bee89dc" containerName="mariadb-database-create" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.486386 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="65191a52-e21a-4953-acb8-1e524a4bcfa7" containerName="mariadb-account-create-update" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.487673 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.499873 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.500105 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-kgljt" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.500362 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.500683 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.528122 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-vbjbt"] Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.613740 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chbsn\" (UniqueName: \"kubernetes.io/projected/e22dfd1e-8efb-4309-a859-79f256e6eb78-kube-api-access-chbsn\") pod \"aodh-db-sync-vbjbt\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.613848 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-scripts\") pod \"aodh-db-sync-vbjbt\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.613963 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-combined-ca-bundle\") pod \"aodh-db-sync-vbjbt\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.614033 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-config-data\") pod \"aodh-db-sync-vbjbt\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.715933 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-combined-ca-bundle\") pod \"aodh-db-sync-vbjbt\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.716037 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-config-data\") pod \"aodh-db-sync-vbjbt\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.716205 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chbsn\" (UniqueName: \"kubernetes.io/projected/e22dfd1e-8efb-4309-a859-79f256e6eb78-kube-api-access-chbsn\") pod \"aodh-db-sync-vbjbt\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.716327 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-scripts\") pod \"aodh-db-sync-vbjbt\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.725912 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-config-data\") pod \"aodh-db-sync-vbjbt\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.735367 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-scripts\") pod \"aodh-db-sync-vbjbt\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.735659 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-combined-ca-bundle\") pod \"aodh-db-sync-vbjbt\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.740198 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chbsn\" (UniqueName: \"kubernetes.io/projected/e22dfd1e-8efb-4309-a859-79f256e6eb78-kube-api-access-chbsn\") pod \"aodh-db-sync-vbjbt\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:53:51 crc kubenswrapper[4900]: I0127 12:53:51.815889 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:53:52 crc kubenswrapper[4900]: I0127 12:53:52.345122 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-vbjbt"] Jan 27 12:53:53 crc kubenswrapper[4900]: I0127 12:53:53.188422 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-vbjbt" event={"ID":"e22dfd1e-8efb-4309-a859-79f256e6eb78","Type":"ContainerStarted","Data":"51cb7ad0a47afeae39be4aa998ea08a308c9d3677c7943c080bd2adef458b73b"} Jan 27 12:53:57 crc kubenswrapper[4900]: I0127 12:53:57.281734 4900 generic.go:334] "Generic (PLEG): container finished" podID="7007f6f8-047f-404d-9094-1ba8d95238a6" containerID="8d1022e1c5663d3c9ead4b366419850ad65cbe2406946b46a003d9845f0e9fb4" exitCode=0 Jan 27 12:53:57 crc kubenswrapper[4900]: I0127 12:53:57.282205 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-hj679" event={"ID":"7007f6f8-047f-404d-9094-1ba8d95238a6","Type":"ContainerDied","Data":"8d1022e1c5663d3c9ead4b366419850ad65cbe2406946b46a003d9845f0e9fb4"} Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.297780 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-vbjbt" event={"ID":"e22dfd1e-8efb-4309-a859-79f256e6eb78","Type":"ContainerStarted","Data":"e90e71088358793e98d3671c3bacb75ced3fd1e80b860cf1140887d9c6e44be8"} Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.337074 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-vbjbt" podStartSLOduration=2.339826404 podStartE2EDuration="7.337015695s" podCreationTimestamp="2026-01-27 12:53:51 +0000 UTC" firstStartedPulling="2026-01-27 12:53:52.363111177 +0000 UTC m=+1659.600139387" lastFinishedPulling="2026-01-27 12:53:57.360300468 +0000 UTC m=+1664.597328678" observedRunningTime="2026-01-27 12:53:58.322247496 +0000 UTC m=+1665.559275706" watchObservedRunningTime="2026-01-27 12:53:58.337015695 +0000 UTC m=+1665.574043905" Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.756647 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.809348 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-scripts\") pod \"7007f6f8-047f-404d-9094-1ba8d95238a6\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.809650 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-combined-ca-bundle\") pod \"7007f6f8-047f-404d-9094-1ba8d95238a6\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.809686 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-config-data\") pod \"7007f6f8-047f-404d-9094-1ba8d95238a6\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.809801 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bcwd\" (UniqueName: \"kubernetes.io/projected/7007f6f8-047f-404d-9094-1ba8d95238a6-kube-api-access-8bcwd\") pod \"7007f6f8-047f-404d-9094-1ba8d95238a6\" (UID: \"7007f6f8-047f-404d-9094-1ba8d95238a6\") " Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.816416 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-scripts" (OuterVolumeSpecName: "scripts") pod "7007f6f8-047f-404d-9094-1ba8d95238a6" (UID: "7007f6f8-047f-404d-9094-1ba8d95238a6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.816421 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7007f6f8-047f-404d-9094-1ba8d95238a6-kube-api-access-8bcwd" (OuterVolumeSpecName: "kube-api-access-8bcwd") pod "7007f6f8-047f-404d-9094-1ba8d95238a6" (UID: "7007f6f8-047f-404d-9094-1ba8d95238a6"). InnerVolumeSpecName "kube-api-access-8bcwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.843996 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-config-data" (OuterVolumeSpecName: "config-data") pod "7007f6f8-047f-404d-9094-1ba8d95238a6" (UID: "7007f6f8-047f-404d-9094-1ba8d95238a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.844763 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7007f6f8-047f-404d-9094-1ba8d95238a6" (UID: "7007f6f8-047f-404d-9094-1ba8d95238a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.913567 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.913953 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.913972 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7007f6f8-047f-404d-9094-1ba8d95238a6-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:58 crc kubenswrapper[4900]: I0127 12:53:58.913985 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bcwd\" (UniqueName: \"kubernetes.io/projected/7007f6f8-047f-404d-9094-1ba8d95238a6-kube-api-access-8bcwd\") on node \"crc\" DevicePath \"\"" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.342140 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-hj679" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.342135 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-hj679" event={"ID":"7007f6f8-047f-404d-9094-1ba8d95238a6","Type":"ContainerDied","Data":"a447f585842e5dbf519d8d9c4f96488075c1ae2552560ef943aa2a177bb61a40"} Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.342235 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a447f585842e5dbf519d8d9c4f96488075c1ae2552560ef943aa2a177bb61a40" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.592914 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 27 12:53:59 crc kubenswrapper[4900]: E0127 12:53:59.593831 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7007f6f8-047f-404d-9094-1ba8d95238a6" containerName="nova-cell0-conductor-db-sync" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.593851 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="7007f6f8-047f-404d-9094-1ba8d95238a6" containerName="nova-cell0-conductor-db-sync" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.594150 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="7007f6f8-047f-404d-9094-1ba8d95238a6" containerName="nova-cell0-conductor-db-sync" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.595078 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.601186 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-tjhb5" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.601525 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.619796 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.744620 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/752097c1-958f-4f9b-868d-e8ec0136da53-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"752097c1-958f-4f9b-868d-e8ec0136da53\") " pod="openstack/nova-cell0-conductor-0" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.744983 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctswf\" (UniqueName: \"kubernetes.io/projected/752097c1-958f-4f9b-868d-e8ec0136da53-kube-api-access-ctswf\") pod \"nova-cell0-conductor-0\" (UID: \"752097c1-958f-4f9b-868d-e8ec0136da53\") " pod="openstack/nova-cell0-conductor-0" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.745251 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/752097c1-958f-4f9b-868d-e8ec0136da53-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"752097c1-958f-4f9b-868d-e8ec0136da53\") " pod="openstack/nova-cell0-conductor-0" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.847294 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/752097c1-958f-4f9b-868d-e8ec0136da53-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"752097c1-958f-4f9b-868d-e8ec0136da53\") " pod="openstack/nova-cell0-conductor-0" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.847367 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/752097c1-958f-4f9b-868d-e8ec0136da53-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"752097c1-958f-4f9b-868d-e8ec0136da53\") " pod="openstack/nova-cell0-conductor-0" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.847475 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctswf\" (UniqueName: \"kubernetes.io/projected/752097c1-958f-4f9b-868d-e8ec0136da53-kube-api-access-ctswf\") pod \"nova-cell0-conductor-0\" (UID: \"752097c1-958f-4f9b-868d-e8ec0136da53\") " pod="openstack/nova-cell0-conductor-0" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.851744 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/752097c1-958f-4f9b-868d-e8ec0136da53-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"752097c1-958f-4f9b-868d-e8ec0136da53\") " pod="openstack/nova-cell0-conductor-0" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.851940 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/752097c1-958f-4f9b-868d-e8ec0136da53-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"752097c1-958f-4f9b-868d-e8ec0136da53\") " pod="openstack/nova-cell0-conductor-0" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.864521 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctswf\" (UniqueName: \"kubernetes.io/projected/752097c1-958f-4f9b-868d-e8ec0136da53-kube-api-access-ctswf\") pod \"nova-cell0-conductor-0\" (UID: \"752097c1-958f-4f9b-868d-e8ec0136da53\") " pod="openstack/nova-cell0-conductor-0" Jan 27 12:53:59 crc kubenswrapper[4900]: I0127 12:53:59.959019 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 27 12:54:00 crc kubenswrapper[4900]: I0127 12:54:00.357178 4900 generic.go:334] "Generic (PLEG): container finished" podID="e22dfd1e-8efb-4309-a859-79f256e6eb78" containerID="e90e71088358793e98d3671c3bacb75ced3fd1e80b860cf1140887d9c6e44be8" exitCode=0 Jan 27 12:54:00 crc kubenswrapper[4900]: I0127 12:54:00.357268 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-vbjbt" event={"ID":"e22dfd1e-8efb-4309-a859-79f256e6eb78","Type":"ContainerDied","Data":"e90e71088358793e98d3671c3bacb75ced3fd1e80b860cf1140887d9c6e44be8"} Jan 27 12:54:00 crc kubenswrapper[4900]: I0127 12:54:00.512129 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 27 12:54:01 crc kubenswrapper[4900]: I0127 12:54:01.371833 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"752097c1-958f-4f9b-868d-e8ec0136da53","Type":"ContainerStarted","Data":"39fa46037061f9a2304e7e3db102c3bd2f00a89a48f6a0027b16c5ecee8d470e"} Jan 27 12:54:01 crc kubenswrapper[4900]: I0127 12:54:01.372220 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"752097c1-958f-4f9b-868d-e8ec0136da53","Type":"ContainerStarted","Data":"32633be5c04a061e075653ca98ba2d12f1e04e165fb454981db89a33e67afa85"} Jan 27 12:54:01 crc kubenswrapper[4900]: I0127 12:54:01.395685 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.395663493 podStartE2EDuration="2.395663493s" podCreationTimestamp="2026-01-27 12:53:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:54:01.393727787 +0000 UTC m=+1668.630755997" watchObservedRunningTime="2026-01-27 12:54:01.395663493 +0000 UTC m=+1668.632691693" Jan 27 12:54:01 crc kubenswrapper[4900]: I0127 12:54:01.841374 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:54:01 crc kubenswrapper[4900]: I0127 12:54:01.929811 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-config-data\") pod \"e22dfd1e-8efb-4309-a859-79f256e6eb78\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " Jan 27 12:54:01 crc kubenswrapper[4900]: I0127 12:54:01.930009 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-chbsn\" (UniqueName: \"kubernetes.io/projected/e22dfd1e-8efb-4309-a859-79f256e6eb78-kube-api-access-chbsn\") pod \"e22dfd1e-8efb-4309-a859-79f256e6eb78\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " Jan 27 12:54:01 crc kubenswrapper[4900]: I0127 12:54:01.930106 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-combined-ca-bundle\") pod \"e22dfd1e-8efb-4309-a859-79f256e6eb78\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " Jan 27 12:54:01 crc kubenswrapper[4900]: I0127 12:54:01.930192 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-scripts\") pod \"e22dfd1e-8efb-4309-a859-79f256e6eb78\" (UID: \"e22dfd1e-8efb-4309-a859-79f256e6eb78\") " Jan 27 12:54:01 crc kubenswrapper[4900]: I0127 12:54:01.938045 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-scripts" (OuterVolumeSpecName: "scripts") pod "e22dfd1e-8efb-4309-a859-79f256e6eb78" (UID: "e22dfd1e-8efb-4309-a859-79f256e6eb78"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:01 crc kubenswrapper[4900]: I0127 12:54:01.938549 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e22dfd1e-8efb-4309-a859-79f256e6eb78-kube-api-access-chbsn" (OuterVolumeSpecName: "kube-api-access-chbsn") pod "e22dfd1e-8efb-4309-a859-79f256e6eb78" (UID: "e22dfd1e-8efb-4309-a859-79f256e6eb78"). InnerVolumeSpecName "kube-api-access-chbsn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:54:01 crc kubenswrapper[4900]: I0127 12:54:01.968461 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-config-data" (OuterVolumeSpecName: "config-data") pod "e22dfd1e-8efb-4309-a859-79f256e6eb78" (UID: "e22dfd1e-8efb-4309-a859-79f256e6eb78"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:01 crc kubenswrapper[4900]: I0127 12:54:01.981377 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e22dfd1e-8efb-4309-a859-79f256e6eb78" (UID: "e22dfd1e-8efb-4309-a859-79f256e6eb78"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:02 crc kubenswrapper[4900]: I0127 12:54:02.032853 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:02 crc kubenswrapper[4900]: I0127 12:54:02.032897 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:02 crc kubenswrapper[4900]: I0127 12:54:02.032912 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-chbsn\" (UniqueName: \"kubernetes.io/projected/e22dfd1e-8efb-4309-a859-79f256e6eb78-kube-api-access-chbsn\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:02 crc kubenswrapper[4900]: I0127 12:54:02.032926 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e22dfd1e-8efb-4309-a859-79f256e6eb78-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:02 crc kubenswrapper[4900]: I0127 12:54:02.385833 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-vbjbt" event={"ID":"e22dfd1e-8efb-4309-a859-79f256e6eb78","Type":"ContainerDied","Data":"51cb7ad0a47afeae39be4aa998ea08a308c9d3677c7943c080bd2adef458b73b"} Jan 27 12:54:02 crc kubenswrapper[4900]: I0127 12:54:02.385886 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="51cb7ad0a47afeae39be4aa998ea08a308c9d3677c7943c080bd2adef458b73b" Jan 27 12:54:02 crc kubenswrapper[4900]: I0127 12:54:02.385899 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-vbjbt" Jan 27 12:54:02 crc kubenswrapper[4900]: I0127 12:54:02.387523 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 27 12:54:03 crc kubenswrapper[4900]: I0127 12:54:03.482492 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:54:03 crc kubenswrapper[4900]: E0127 12:54:03.483674 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.602235 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 27 12:54:06 crc kubenswrapper[4900]: E0127 12:54:06.603284 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e22dfd1e-8efb-4309-a859-79f256e6eb78" containerName="aodh-db-sync" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.603299 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="e22dfd1e-8efb-4309-a859-79f256e6eb78" containerName="aodh-db-sync" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.603584 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="e22dfd1e-8efb-4309-a859-79f256e6eb78" containerName="aodh-db-sync" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.606654 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.612219 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.612348 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-kgljt" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.612473 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.645310 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.742265 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-scripts\") pod \"aodh-0\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " pod="openstack/aodh-0" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.742599 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-combined-ca-bundle\") pod \"aodh-0\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " pod="openstack/aodh-0" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.742642 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhf7g\" (UniqueName: \"kubernetes.io/projected/182248e0-13c8-4d04-bfd2-f15598f4820f-kube-api-access-lhf7g\") pod \"aodh-0\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " pod="openstack/aodh-0" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.742670 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-config-data\") pod \"aodh-0\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " pod="openstack/aodh-0" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.845830 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-scripts\") pod \"aodh-0\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " pod="openstack/aodh-0" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.845916 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-combined-ca-bundle\") pod \"aodh-0\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " pod="openstack/aodh-0" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.845958 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhf7g\" (UniqueName: \"kubernetes.io/projected/182248e0-13c8-4d04-bfd2-f15598f4820f-kube-api-access-lhf7g\") pod \"aodh-0\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " pod="openstack/aodh-0" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.845997 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-config-data\") pod \"aodh-0\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " pod="openstack/aodh-0" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.862723 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-config-data\") pod \"aodh-0\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " pod="openstack/aodh-0" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.866984 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-scripts\") pod \"aodh-0\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " pod="openstack/aodh-0" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.871866 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-combined-ca-bundle\") pod \"aodh-0\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " pod="openstack/aodh-0" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.880043 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhf7g\" (UniqueName: \"kubernetes.io/projected/182248e0-13c8-4d04-bfd2-f15598f4820f-kube-api-access-lhf7g\") pod \"aodh-0\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " pod="openstack/aodh-0" Jan 27 12:54:06 crc kubenswrapper[4900]: I0127 12:54:06.937958 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 27 12:54:07 crc kubenswrapper[4900]: W0127 12:54:07.560310 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod182248e0_13c8_4d04_bfd2_f15598f4820f.slice/crio-a9bc195e68284cefbbebac2043aeb7d03c3f297288a8ae5a1bf7bf0d25c9492e WatchSource:0}: Error finding container a9bc195e68284cefbbebac2043aeb7d03c3f297288a8ae5a1bf7bf0d25c9492e: Status 404 returned error can't find the container with id a9bc195e68284cefbbebac2043aeb7d03c3f297288a8ae5a1bf7bf0d25c9492e Jan 27 12:54:07 crc kubenswrapper[4900]: I0127 12:54:07.590922 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 27 12:54:08 crc kubenswrapper[4900]: I0127 12:54:08.458920 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"182248e0-13c8-4d04-bfd2-f15598f4820f","Type":"ContainerStarted","Data":"a9bc195e68284cefbbebac2043aeb7d03c3f297288a8ae5a1bf7bf0d25c9492e"} Jan 27 12:54:09 crc kubenswrapper[4900]: I0127 12:54:09.476038 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"182248e0-13c8-4d04-bfd2-f15598f4820f","Type":"ContainerStarted","Data":"9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502"} Jan 27 12:54:09 crc kubenswrapper[4900]: I0127 12:54:09.820198 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:54:09 crc kubenswrapper[4900]: I0127 12:54:09.820580 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="ceilometer-central-agent" containerID="cri-o://c6ba07dc6e560cbde49eed285716318f408ade3993351613835465f47b753adf" gracePeriod=30 Jan 27 12:54:09 crc kubenswrapper[4900]: I0127 12:54:09.821125 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="proxy-httpd" containerID="cri-o://6a1d2d743d496d79c9c29a8295a4e7afd63532fb0c5fe7b419c850ea92894647" gracePeriod=30 Jan 27 12:54:09 crc kubenswrapper[4900]: I0127 12:54:09.821210 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="sg-core" containerID="cri-o://853b9d9b02d1d4fbab8509ee530a89baa7d96fc03c4e0b0675d4ad924177d3fd" gracePeriod=30 Jan 27 12:54:09 crc kubenswrapper[4900]: I0127 12:54:09.821260 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="ceilometer-notification-agent" containerID="cri-o://ffde071f4e8f2341a93f616a55f75635c6d75edc4c9cec7b14f3d3034c3581fd" gracePeriod=30 Jan 27 12:54:09 crc kubenswrapper[4900]: I0127 12:54:09.835067 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.238:3000/\": EOF" Jan 27 12:54:10 crc kubenswrapper[4900]: I0127 12:54:10.010825 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 27 12:54:10 crc kubenswrapper[4900]: I0127 12:54:10.490074 4900 generic.go:334] "Generic (PLEG): container finished" podID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerID="6a1d2d743d496d79c9c29a8295a4e7afd63532fb0c5fe7b419c850ea92894647" exitCode=0 Jan 27 12:54:10 crc kubenswrapper[4900]: I0127 12:54:10.490496 4900 generic.go:334] "Generic (PLEG): container finished" podID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerID="853b9d9b02d1d4fbab8509ee530a89baa7d96fc03c4e0b0675d4ad924177d3fd" exitCode=2 Jan 27 12:54:10 crc kubenswrapper[4900]: I0127 12:54:10.498886 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4bb3c3d-78b6-46c9-b197-04efff895f36","Type":"ContainerDied","Data":"6a1d2d743d496d79c9c29a8295a4e7afd63532fb0c5fe7b419c850ea92894647"} Jan 27 12:54:10 crc kubenswrapper[4900]: I0127 12:54:10.498963 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4bb3c3d-78b6-46c9-b197-04efff895f36","Type":"ContainerDied","Data":"853b9d9b02d1d4fbab8509ee530a89baa7d96fc03c4e0b0675d4ad924177d3fd"} Jan 27 12:54:10 crc kubenswrapper[4900]: I0127 12:54:10.573921 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.144111 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-tp8r6"] Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.147753 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.151678 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.151923 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.168966 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-tp8r6"] Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.174440 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-scripts\") pod \"nova-cell0-cell-mapping-tp8r6\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.179528 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-tp8r6\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.180176 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-885kl\" (UniqueName: \"kubernetes.io/projected/eff943df-8066-483c-a0c7-0fb3f7346380-kube-api-access-885kl\") pod \"nova-cell0-cell-mapping-tp8r6\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.180400 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-config-data\") pod \"nova-cell0-cell-mapping-tp8r6\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.298878 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-885kl\" (UniqueName: \"kubernetes.io/projected/eff943df-8066-483c-a0c7-0fb3f7346380-kube-api-access-885kl\") pod \"nova-cell0-cell-mapping-tp8r6\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.298993 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-config-data\") pod \"nova-cell0-cell-mapping-tp8r6\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.299156 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-scripts\") pod \"nova-cell0-cell-mapping-tp8r6\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.299328 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-tp8r6\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.305769 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-scripts\") pod \"nova-cell0-cell-mapping-tp8r6\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.307253 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-tp8r6\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.330164 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-config-data\") pod \"nova-cell0-cell-mapping-tp8r6\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.361730 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-885kl\" (UniqueName: \"kubernetes.io/projected/eff943df-8066-483c-a0c7-0fb3f7346380-kube-api-access-885kl\") pod \"nova-cell0-cell-mapping-tp8r6\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.412979 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.441305 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.444533 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.445410 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.470291 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.503207 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34251519-108d-402e-9848-301fa422c83f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " pod="openstack/nova-api-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.503802 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34251519-108d-402e-9848-301fa422c83f-logs\") pod \"nova-api-0\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " pod="openstack/nova-api-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.503843 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsmfl\" (UniqueName: \"kubernetes.io/projected/34251519-108d-402e-9848-301fa422c83f-kube-api-access-qsmfl\") pod \"nova-api-0\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " pod="openstack/nova-api-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.503949 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34251519-108d-402e-9848-301fa422c83f-config-data\") pod \"nova-api-0\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " pod="openstack/nova-api-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.548290 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.550736 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.556680 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.590101 4900 generic.go:334] "Generic (PLEG): container finished" podID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerID="c6ba07dc6e560cbde49eed285716318f408ade3993351613835465f47b753adf" exitCode=0 Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.590444 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.595751 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4bb3c3d-78b6-46c9-b197-04efff895f36","Type":"ContainerDied","Data":"c6ba07dc6e560cbde49eed285716318f408ade3993351613835465f47b753adf"} Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.596325 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.606140 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34251519-108d-402e-9848-301fa422c83f-logs\") pod \"nova-api-0\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " pod="openstack/nova-api-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.606183 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsmfl\" (UniqueName: \"kubernetes.io/projected/34251519-108d-402e-9848-301fa422c83f-kube-api-access-qsmfl\") pod \"nova-api-0\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " pod="openstack/nova-api-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.606328 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ca41e27-112f-461c-9556-768eea0cbdf6-logs\") pod \"nova-metadata-0\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " pod="openstack/nova-metadata-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.606360 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34251519-108d-402e-9848-301fa422c83f-config-data\") pod \"nova-api-0\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " pod="openstack/nova-api-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.606477 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34251519-108d-402e-9848-301fa422c83f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " pod="openstack/nova-api-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.606548 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ca41e27-112f-461c-9556-768eea0cbdf6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " pod="openstack/nova-metadata-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.606663 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ca41e27-112f-461c-9556-768eea0cbdf6-config-data\") pod \"nova-metadata-0\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " pod="openstack/nova-metadata-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.606701 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4crnr\" (UniqueName: \"kubernetes.io/projected/6ca41e27-112f-461c-9556-768eea0cbdf6-kube-api-access-4crnr\") pod \"nova-metadata-0\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " pod="openstack/nova-metadata-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.614663 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34251519-108d-402e-9848-301fa422c83f-logs\") pod \"nova-api-0\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " pod="openstack/nova-api-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.621655 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.621986 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.632933 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34251519-108d-402e-9848-301fa422c83f-config-data\") pod \"nova-api-0\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " pod="openstack/nova-api-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.631532 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"182248e0-13c8-4d04-bfd2-f15598f4820f","Type":"ContainerStarted","Data":"51eca2052983c39961bc7495089e560c7e1709bfe6705702c6bc3626f75eda66"} Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.638921 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.640799 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34251519-108d-402e-9848-301fa422c83f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " pod="openstack/nova-api-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.671857 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsmfl\" (UniqueName: \"kubernetes.io/projected/34251519-108d-402e-9848-301fa422c83f-kube-api-access-qsmfl\") pod \"nova-api-0\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " pod="openstack/nova-api-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.714081 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ca41e27-112f-461c-9556-768eea0cbdf6-config-data\") pod \"nova-metadata-0\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " pod="openstack/nova-metadata-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.714145 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xk2t\" (UniqueName: \"kubernetes.io/projected/73158450-c56d-45ec-a637-d46c435e81a0-kube-api-access-2xk2t\") pod \"nova-cell1-novncproxy-0\" (UID: \"73158450-c56d-45ec-a637-d46c435e81a0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.714175 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4crnr\" (UniqueName: \"kubernetes.io/projected/6ca41e27-112f-461c-9556-768eea0cbdf6-kube-api-access-4crnr\") pod \"nova-metadata-0\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " pod="openstack/nova-metadata-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.714312 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ca41e27-112f-461c-9556-768eea0cbdf6-logs\") pod \"nova-metadata-0\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " pod="openstack/nova-metadata-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.714358 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73158450-c56d-45ec-a637-d46c435e81a0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"73158450-c56d-45ec-a637-d46c435e81a0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.714411 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73158450-c56d-45ec-a637-d46c435e81a0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"73158450-c56d-45ec-a637-d46c435e81a0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.714452 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ca41e27-112f-461c-9556-768eea0cbdf6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " pod="openstack/nova-metadata-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.715950 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ca41e27-112f-461c-9556-768eea0cbdf6-logs\") pod \"nova-metadata-0\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " pod="openstack/nova-metadata-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.747005 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ca41e27-112f-461c-9556-768eea0cbdf6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " pod="openstack/nova-metadata-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.763734 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ca41e27-112f-461c-9556-768eea0cbdf6-config-data\") pod \"nova-metadata-0\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " pod="openstack/nova-metadata-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.767408 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.768902 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.771778 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.781510 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4crnr\" (UniqueName: \"kubernetes.io/projected/6ca41e27-112f-461c-9556-768eea0cbdf6-kube-api-access-4crnr\") pod \"nova-metadata-0\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " pod="openstack/nova-metadata-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.788532 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-np784"] Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.791381 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.792078 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.817477 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73158450-c56d-45ec-a637-d46c435e81a0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"73158450-c56d-45ec-a637-d46c435e81a0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.817551 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wq7t6\" (UniqueName: \"kubernetes.io/projected/7c07718a-743c-4b97-bd07-ff1192928e8f-kube-api-access-wq7t6\") pod \"nova-scheduler-0\" (UID: \"7c07718a-743c-4b97-bd07-ff1192928e8f\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.817614 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73158450-c56d-45ec-a637-d46c435e81a0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"73158450-c56d-45ec-a637-d46c435e81a0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.817646 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c07718a-743c-4b97-bd07-ff1192928e8f-config-data\") pod \"nova-scheduler-0\" (UID: \"7c07718a-743c-4b97-bd07-ff1192928e8f\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.817765 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xk2t\" (UniqueName: \"kubernetes.io/projected/73158450-c56d-45ec-a637-d46c435e81a0-kube-api-access-2xk2t\") pod \"nova-cell1-novncproxy-0\" (UID: \"73158450-c56d-45ec-a637-d46c435e81a0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.817803 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c07718a-743c-4b97-bd07-ff1192928e8f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7c07718a-743c-4b97-bd07-ff1192928e8f\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.819364 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.825006 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73158450-c56d-45ec-a637-d46c435e81a0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"73158450-c56d-45ec-a637-d46c435e81a0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.838548 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73158450-c56d-45ec-a637-d46c435e81a0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"73158450-c56d-45ec-a637-d46c435e81a0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.842303 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xk2t\" (UniqueName: \"kubernetes.io/projected/73158450-c56d-45ec-a637-d46c435e81a0-kube-api-access-2xk2t\") pod \"nova-cell1-novncproxy-0\" (UID: \"73158450-c56d-45ec-a637-d46c435e81a0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.853642 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-np784"] Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.925327 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.927633 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c07718a-743c-4b97-bd07-ff1192928e8f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7c07718a-743c-4b97-bd07-ff1192928e8f\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.928286 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.928362 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-config\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.928418 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.928442 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.928459 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkzzv\" (UniqueName: \"kubernetes.io/projected/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-kube-api-access-lkzzv\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.928496 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wq7t6\" (UniqueName: \"kubernetes.io/projected/7c07718a-743c-4b97-bd07-ff1192928e8f-kube-api-access-wq7t6\") pod \"nova-scheduler-0\" (UID: \"7c07718a-743c-4b97-bd07-ff1192928e8f\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.928567 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c07718a-743c-4b97-bd07-ff1192928e8f-config-data\") pod \"nova-scheduler-0\" (UID: \"7c07718a-743c-4b97-bd07-ff1192928e8f\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.928612 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.931718 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c07718a-743c-4b97-bd07-ff1192928e8f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7c07718a-743c-4b97-bd07-ff1192928e8f\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.932149 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c07718a-743c-4b97-bd07-ff1192928e8f-config-data\") pod \"nova-scheduler-0\" (UID: \"7c07718a-743c-4b97-bd07-ff1192928e8f\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.957884 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wq7t6\" (UniqueName: \"kubernetes.io/projected/7c07718a-743c-4b97-bd07-ff1192928e8f-kube-api-access-wq7t6\") pod \"nova-scheduler-0\" (UID: \"7c07718a-743c-4b97-bd07-ff1192928e8f\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:11 crc kubenswrapper[4900]: I0127 12:54:11.991044 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.008505 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.030943 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.031088 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-config\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.031151 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.031182 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.031198 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkzzv\" (UniqueName: \"kubernetes.io/projected/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-kube-api-access-lkzzv\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.031355 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.032655 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.032938 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.033326 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.033861 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.034441 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-config\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.082974 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkzzv\" (UniqueName: \"kubernetes.io/projected/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-kube-api-access-lkzzv\") pod \"dnsmasq-dns-5fbc4d444f-np784\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.320863 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.626412 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.238:3000/\": dial tcp 10.217.0.238:3000: connect: connection refused" Jan 27 12:54:12 crc kubenswrapper[4900]: W0127 12:54:12.850446 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeff943df_8066_483c_a0c7_0fb3f7346380.slice/crio-3dd40f5c799a1cd9efd1e857050671f2e604dfb6e06ed89e6390aa5e70c45b9c WatchSource:0}: Error finding container 3dd40f5c799a1cd9efd1e857050671f2e604dfb6e06ed89e6390aa5e70c45b9c: Status 404 returned error can't find the container with id 3dd40f5c799a1cd9efd1e857050671f2e604dfb6e06ed89e6390aa5e70c45b9c Jan 27 12:54:12 crc kubenswrapper[4900]: I0127 12:54:12.894200 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-tp8r6"] Jan 27 12:54:13 crc kubenswrapper[4900]: I0127 12:54:13.184029 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:54:13 crc kubenswrapper[4900]: I0127 12:54:13.661972 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 12:54:13 crc kubenswrapper[4900]: I0127 12:54:13.676306 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:54:13 crc kubenswrapper[4900]: I0127 12:54:13.749932 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-np784"] Jan 27 12:54:13 crc kubenswrapper[4900]: I0127 12:54:13.761868 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:54:13 crc kubenswrapper[4900]: I0127 12:54:13.862776 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"34251519-108d-402e-9848-301fa422c83f","Type":"ContainerStarted","Data":"687846f04bc576dadd1e347e8989a8642fd4b388aec9c58c6c2d3cf44d8529f5"} Jan 27 12:54:13 crc kubenswrapper[4900]: I0127 12:54:13.879423 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tp8r6" event={"ID":"eff943df-8066-483c-a0c7-0fb3f7346380","Type":"ContainerStarted","Data":"a6f20a452a2479b3b0a8ab5d857f98014bff4d463aea096cf23ede2b3b90a24f"} Jan 27 12:54:13 crc kubenswrapper[4900]: I0127 12:54:13.879909 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tp8r6" event={"ID":"eff943df-8066-483c-a0c7-0fb3f7346380","Type":"ContainerStarted","Data":"3dd40f5c799a1cd9efd1e857050671f2e604dfb6e06ed89e6390aa5e70c45b9c"} Jan 27 12:54:13 crc kubenswrapper[4900]: I0127 12:54:13.888455 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"73158450-c56d-45ec-a637-d46c435e81a0","Type":"ContainerStarted","Data":"6a7752382091b366cbbcb0b44f0c38471593c1983a7cc52cd122608a92567e3e"} Jan 27 12:54:13 crc kubenswrapper[4900]: I0127 12:54:13.897009 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c07718a-743c-4b97-bd07-ff1192928e8f","Type":"ContainerStarted","Data":"648763f2746f5423f00c6ff2c552733604ace1b076c6a1c477988eb16718115f"} Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.519020 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-tp8r6" podStartSLOduration=3.5189966630000002 podStartE2EDuration="3.518996663s" podCreationTimestamp="2026-01-27 12:54:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:54:13.915449882 +0000 UTC m=+1681.152478092" watchObservedRunningTime="2026-01-27 12:54:14.518996663 +0000 UTC m=+1681.756024873" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.522350 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-cjbb2"] Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.524368 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.531697 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.531984 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.556048 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-cjbb2"] Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.671661 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-scripts\") pod \"nova-cell1-conductor-db-sync-cjbb2\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.672195 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-config-data\") pod \"nova-cell1-conductor-db-sync-cjbb2\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.672401 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28vzm\" (UniqueName: \"kubernetes.io/projected/c0c2f998-53fe-4850-a061-6f275bb39313-kube-api-access-28vzm\") pod \"nova-cell1-conductor-db-sync-cjbb2\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.672552 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-cjbb2\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.792639 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28vzm\" (UniqueName: \"kubernetes.io/projected/c0c2f998-53fe-4850-a061-6f275bb39313-kube-api-access-28vzm\") pod \"nova-cell1-conductor-db-sync-cjbb2\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.792739 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-cjbb2\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.792928 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-scripts\") pod \"nova-cell1-conductor-db-sync-cjbb2\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.792958 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-config-data\") pod \"nova-cell1-conductor-db-sync-cjbb2\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.802902 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-config-data\") pod \"nova-cell1-conductor-db-sync-cjbb2\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.814028 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-cjbb2\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.822793 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-scripts\") pod \"nova-cell1-conductor-db-sync-cjbb2\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.824043 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28vzm\" (UniqueName: \"kubernetes.io/projected/c0c2f998-53fe-4850-a061-6f275bb39313-kube-api-access-28vzm\") pod \"nova-cell1-conductor-db-sync-cjbb2\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.895740 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.935782 4900 generic.go:334] "Generic (PLEG): container finished" podID="dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" containerID="3a84864f7e482b6a687b22c1f96dd8e49262950c68e7377d7fa998effcacf94d" exitCode=0 Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.935871 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-np784" event={"ID":"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc","Type":"ContainerDied","Data":"3a84864f7e482b6a687b22c1f96dd8e49262950c68e7377d7fa998effcacf94d"} Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.935899 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-np784" event={"ID":"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc","Type":"ContainerStarted","Data":"a354d11ef1d81bc57e38d77d86c28d945d09a1fb6670012708698e9567c3e22a"} Jan 27 12:54:14 crc kubenswrapper[4900]: I0127 12:54:14.941101 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ca41e27-112f-461c-9556-768eea0cbdf6","Type":"ContainerStarted","Data":"28d9a70441d45d1d4866b522fe61e1b1691fa725b2001f0365f19f3aac8950e3"} Jan 27 12:54:15 crc kubenswrapper[4900]: I0127 12:54:15.494456 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:54:15 crc kubenswrapper[4900]: E0127 12:54:15.494955 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:54:15 crc kubenswrapper[4900]: I0127 12:54:15.962489 4900 generic.go:334] "Generic (PLEG): container finished" podID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerID="ffde071f4e8f2341a93f616a55f75635c6d75edc4c9cec7b14f3d3034c3581fd" exitCode=0 Jan 27 12:54:15 crc kubenswrapper[4900]: I0127 12:54:15.962590 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4bb3c3d-78b6-46c9-b197-04efff895f36","Type":"ContainerDied","Data":"ffde071f4e8f2341a93f616a55f75635c6d75edc4c9cec7b14f3d3034c3581fd"} Jan 27 12:54:16 crc kubenswrapper[4900]: I0127 12:54:16.592599 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:54:16 crc kubenswrapper[4900]: I0127 12:54:16.655074 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 12:54:17 crc kubenswrapper[4900]: I0127 12:54:17.833985 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:54:17 crc kubenswrapper[4900]: I0127 12:54:17.994905 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4bb3c3d-78b6-46c9-b197-04efff895f36-log-httpd\") pod \"b4bb3c3d-78b6-46c9-b197-04efff895f36\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " Jan 27 12:54:17 crc kubenswrapper[4900]: I0127 12:54:17.995013 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-combined-ca-bundle\") pod \"b4bb3c3d-78b6-46c9-b197-04efff895f36\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " Jan 27 12:54:17 crc kubenswrapper[4900]: I0127 12:54:17.995131 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-sg-core-conf-yaml\") pod \"b4bb3c3d-78b6-46c9-b197-04efff895f36\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " Jan 27 12:54:17 crc kubenswrapper[4900]: I0127 12:54:17.996893 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-config-data\") pod \"b4bb3c3d-78b6-46c9-b197-04efff895f36\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " Jan 27 12:54:17 crc kubenswrapper[4900]: I0127 12:54:17.997024 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-scripts\") pod \"b4bb3c3d-78b6-46c9-b197-04efff895f36\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " Jan 27 12:54:17 crc kubenswrapper[4900]: I0127 12:54:17.997100 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dxkfh\" (UniqueName: \"kubernetes.io/projected/b4bb3c3d-78b6-46c9-b197-04efff895f36-kube-api-access-dxkfh\") pod \"b4bb3c3d-78b6-46c9-b197-04efff895f36\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " Jan 27 12:54:17 crc kubenswrapper[4900]: I0127 12:54:17.997131 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4bb3c3d-78b6-46c9-b197-04efff895f36-run-httpd\") pod \"b4bb3c3d-78b6-46c9-b197-04efff895f36\" (UID: \"b4bb3c3d-78b6-46c9-b197-04efff895f36\") " Jan 27 12:54:17 crc kubenswrapper[4900]: I0127 12:54:17.997540 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4bb3c3d-78b6-46c9-b197-04efff895f36-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b4bb3c3d-78b6-46c9-b197-04efff895f36" (UID: "b4bb3c3d-78b6-46c9-b197-04efff895f36"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:54:17 crc kubenswrapper[4900]: I0127 12:54:17.999339 4900 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4bb3c3d-78b6-46c9-b197-04efff895f36-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:17 crc kubenswrapper[4900]: I0127 12:54:17.999531 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4bb3c3d-78b6-46c9-b197-04efff895f36-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b4bb3c3d-78b6-46c9-b197-04efff895f36" (UID: "b4bb3c3d-78b6-46c9-b197-04efff895f36"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.002934 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-scripts" (OuterVolumeSpecName: "scripts") pod "b4bb3c3d-78b6-46c9-b197-04efff895f36" (UID: "b4bb3c3d-78b6-46c9-b197-04efff895f36"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.016845 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4bb3c3d-78b6-46c9-b197-04efff895f36-kube-api-access-dxkfh" (OuterVolumeSpecName: "kube-api-access-dxkfh") pod "b4bb3c3d-78b6-46c9-b197-04efff895f36" (UID: "b4bb3c3d-78b6-46c9-b197-04efff895f36"). InnerVolumeSpecName "kube-api-access-dxkfh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.054908 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4bb3c3d-78b6-46c9-b197-04efff895f36","Type":"ContainerDied","Data":"dddbb46aa35825ea36a5356daf0e5ae9658e86950502d53f7ca67e5d9b201c7b"} Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.055007 4900 scope.go:117] "RemoveContainer" containerID="6a1d2d743d496d79c9c29a8295a4e7afd63532fb0c5fe7b419c850ea92894647" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.055926 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.057238 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b4bb3c3d-78b6-46c9-b197-04efff895f36" (UID: "b4bb3c3d-78b6-46c9-b197-04efff895f36"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.109999 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.110076 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dxkfh\" (UniqueName: \"kubernetes.io/projected/b4bb3c3d-78b6-46c9-b197-04efff895f36-kube-api-access-dxkfh\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.110099 4900 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4bb3c3d-78b6-46c9-b197-04efff895f36-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.110117 4900 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.172477 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4bb3c3d-78b6-46c9-b197-04efff895f36" (UID: "b4bb3c3d-78b6-46c9-b197-04efff895f36"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.200176 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-config-data" (OuterVolumeSpecName: "config-data") pod "b4bb3c3d-78b6-46c9-b197-04efff895f36" (UID: "b4bb3c3d-78b6-46c9-b197-04efff895f36"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.214186 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.214232 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4bb3c3d-78b6-46c9-b197-04efff895f36-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.427103 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-cjbb2"] Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.464259 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.531484 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.545137 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:54:18 crc kubenswrapper[4900]: E0127 12:54:18.545952 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="ceilometer-notification-agent" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.545975 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="ceilometer-notification-agent" Jan 27 12:54:18 crc kubenswrapper[4900]: E0127 12:54:18.545995 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="sg-core" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.546003 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="sg-core" Jan 27 12:54:18 crc kubenswrapper[4900]: E0127 12:54:18.546020 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="ceilometer-central-agent" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.546027 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="ceilometer-central-agent" Jan 27 12:54:18 crc kubenswrapper[4900]: E0127 12:54:18.546083 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="proxy-httpd" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.546091 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="proxy-httpd" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.546358 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="ceilometer-central-agent" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.546389 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="ceilometer-notification-agent" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.546408 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="sg-core" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.546419 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" containerName="proxy-httpd" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.548955 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.561548 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.561945 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.603498 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.658927 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00248c3f-554f-4360-aad4-9dd642eccf99-log-httpd\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.659115 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4p8l\" (UniqueName: \"kubernetes.io/projected/00248c3f-554f-4360-aad4-9dd642eccf99-kube-api-access-q4p8l\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.659245 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-config-data\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.659613 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.659679 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-scripts\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.659738 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.659826 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00248c3f-554f-4360-aad4-9dd642eccf99-run-httpd\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.762686 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4p8l\" (UniqueName: \"kubernetes.io/projected/00248c3f-554f-4360-aad4-9dd642eccf99-kube-api-access-q4p8l\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.762783 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-config-data\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.762898 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.762927 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-scripts\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.762956 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.762991 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00248c3f-554f-4360-aad4-9dd642eccf99-run-httpd\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.763099 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00248c3f-554f-4360-aad4-9dd642eccf99-log-httpd\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.763651 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00248c3f-554f-4360-aad4-9dd642eccf99-log-httpd\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.763895 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00248c3f-554f-4360-aad4-9dd642eccf99-run-httpd\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.769254 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-scripts\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.775021 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-config-data\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.778158 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.781714 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.785960 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4p8l\" (UniqueName: \"kubernetes.io/projected/00248c3f-554f-4360-aad4-9dd642eccf99-kube-api-access-q4p8l\") pod \"ceilometer-0\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " pod="openstack/ceilometer-0" Jan 27 12:54:18 crc kubenswrapper[4900]: I0127 12:54:18.994520 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:54:19 crc kubenswrapper[4900]: W0127 12:54:19.303001 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc0c2f998_53fe_4850_a061_6f275bb39313.slice/crio-d5534d761c11edb0725424babdce745732349ab47255ee7ed85c24c60a913426 WatchSource:0}: Error finding container d5534d761c11edb0725424babdce745732349ab47255ee7ed85c24c60a913426: Status 404 returned error can't find the container with id d5534d761c11edb0725424babdce745732349ab47255ee7ed85c24c60a913426 Jan 27 12:54:19 crc kubenswrapper[4900]: I0127 12:54:19.366503 4900 scope.go:117] "RemoveContainer" containerID="853b9d9b02d1d4fbab8509ee530a89baa7d96fc03c4e0b0675d4ad924177d3fd" Jan 27 12:54:20 crc kubenswrapper[4900]: I0127 12:54:20.059746 4900 scope.go:117] "RemoveContainer" containerID="ffde071f4e8f2341a93f616a55f75635c6d75edc4c9cec7b14f3d3034c3581fd" Jan 27 12:54:20 crc kubenswrapper[4900]: I0127 12:54:20.094291 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-cjbb2" event={"ID":"c0c2f998-53fe-4850-a061-6f275bb39313","Type":"ContainerStarted","Data":"d5534d761c11edb0725424babdce745732349ab47255ee7ed85c24c60a913426"} Jan 27 12:54:20 crc kubenswrapper[4900]: I0127 12:54:20.100583 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-np784" event={"ID":"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc","Type":"ContainerStarted","Data":"3cf53e8905946af7fd7264c27f6202978992cbf0dcfbc7eac50b1ce426cdfbc5"} Jan 27 12:54:20 crc kubenswrapper[4900]: I0127 12:54:20.100746 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:20 crc kubenswrapper[4900]: I0127 12:54:20.134807 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5fbc4d444f-np784" podStartSLOduration=9.134785451 podStartE2EDuration="9.134785451s" podCreationTimestamp="2026-01-27 12:54:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:54:20.124547794 +0000 UTC m=+1687.361576004" watchObservedRunningTime="2026-01-27 12:54:20.134785451 +0000 UTC m=+1687.371813661" Jan 27 12:54:20 crc kubenswrapper[4900]: I0127 12:54:20.538868 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4bb3c3d-78b6-46c9-b197-04efff895f36" path="/var/lib/kubelet/pods/b4bb3c3d-78b6-46c9-b197-04efff895f36/volumes" Jan 27 12:54:20 crc kubenswrapper[4900]: I0127 12:54:20.543666 4900 scope.go:117] "RemoveContainer" containerID="c6ba07dc6e560cbde49eed285716318f408ade3993351613835465f47b753adf" Jan 27 12:54:20 crc kubenswrapper[4900]: I0127 12:54:20.796720 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.118406 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"182248e0-13c8-4d04-bfd2-f15598f4820f","Type":"ContainerStarted","Data":"b83ebc7ad00e2b1e70dd33ea59f5b764b73be85e089840b8deef1184a993e662"} Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.121211 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"73158450-c56d-45ec-a637-d46c435e81a0","Type":"ContainerStarted","Data":"82e5445b660704009749019a5db8dd12bf97a9baa5180f1b1b3675d38629f52b"} Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.121358 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="73158450-c56d-45ec-a637-d46c435e81a0" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://82e5445b660704009749019a5db8dd12bf97a9baa5180f1b1b3675d38629f52b" gracePeriod=30 Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.148632 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c07718a-743c-4b97-bd07-ff1192928e8f","Type":"ContainerStarted","Data":"8417589e6cb3afd2a44bfc830560b0893a0286e4cc3bd736f93b7220c1797f0c"} Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.161417 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.715874454 podStartE2EDuration="10.161393815s" podCreationTimestamp="2026-01-27 12:54:11 +0000 UTC" firstStartedPulling="2026-01-27 12:54:13.707944932 +0000 UTC m=+1680.944973142" lastFinishedPulling="2026-01-27 12:54:20.153464293 +0000 UTC m=+1687.390492503" observedRunningTime="2026-01-27 12:54:21.155887885 +0000 UTC m=+1688.392916105" watchObservedRunningTime="2026-01-27 12:54:21.161393815 +0000 UTC m=+1688.398422025" Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.179569 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"34251519-108d-402e-9848-301fa422c83f","Type":"ContainerStarted","Data":"a466edcf9a944ad25581df48045bee2a52f5695613115bf6e843f0e62c75dd8f"} Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.179626 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"34251519-108d-402e-9848-301fa422c83f","Type":"ContainerStarted","Data":"bcb754f6b8528393654c2b0115036cdfe50c9365ab7a6937430e308acef3ee90"} Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.185749 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00248c3f-554f-4360-aad4-9dd642eccf99","Type":"ContainerStarted","Data":"c529e3c72e9887c0a06ebf88322e927f860fe8e56f5f9723dd11d083fe53a9ce"} Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.192492 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.26655961 podStartE2EDuration="10.192466277s" podCreationTimestamp="2026-01-27 12:54:11 +0000 UTC" firstStartedPulling="2026-01-27 12:54:13.181091477 +0000 UTC m=+1680.418119687" lastFinishedPulling="2026-01-27 12:54:20.106998144 +0000 UTC m=+1687.344026354" observedRunningTime="2026-01-27 12:54:21.184903397 +0000 UTC m=+1688.421931617" watchObservedRunningTime="2026-01-27 12:54:21.192466277 +0000 UTC m=+1688.429494487" Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.199156 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-cjbb2" event={"ID":"c0c2f998-53fe-4850-a061-6f275bb39313","Type":"ContainerStarted","Data":"dd17466485a03db1dd63a32a13720b1520909966fe30dc93ac38c95b37286c6d"} Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.211233 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6ca41e27-112f-461c-9556-768eea0cbdf6" containerName="nova-metadata-log" containerID="cri-o://20dd8acb9a403c81b8724c37e6493d41bfa2fe5161dcb476bc2711af27a46021" gracePeriod=30 Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.211428 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6ca41e27-112f-461c-9556-768eea0cbdf6" containerName="nova-metadata-metadata" containerID="cri-o://9aec7db7488768345eb7e9451a0947238ec199e2ac9619425cb105cf10bf67ba" gracePeriod=30 Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.212074 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ca41e27-112f-461c-9556-768eea0cbdf6","Type":"ContainerStarted","Data":"9aec7db7488768345eb7e9451a0947238ec199e2ac9619425cb105cf10bf67ba"} Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.212113 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ca41e27-112f-461c-9556-768eea0cbdf6","Type":"ContainerStarted","Data":"20dd8acb9a403c81b8724c37e6493d41bfa2fe5161dcb476bc2711af27a46021"} Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.214488 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.804806275 podStartE2EDuration="10.214459935s" podCreationTimestamp="2026-01-27 12:54:11 +0000 UTC" firstStartedPulling="2026-01-27 12:54:13.784889064 +0000 UTC m=+1681.021917274" lastFinishedPulling="2026-01-27 12:54:20.194542724 +0000 UTC m=+1687.431570934" observedRunningTime="2026-01-27 12:54:21.207775871 +0000 UTC m=+1688.444804101" watchObservedRunningTime="2026-01-27 12:54:21.214459935 +0000 UTC m=+1688.451488145" Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.269739 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.8961574949999997 podStartE2EDuration="10.269713448s" podCreationTimestamp="2026-01-27 12:54:11 +0000 UTC" firstStartedPulling="2026-01-27 12:54:13.824889265 +0000 UTC m=+1681.061917475" lastFinishedPulling="2026-01-27 12:54:20.198445218 +0000 UTC m=+1687.435473428" observedRunningTime="2026-01-27 12:54:21.267867364 +0000 UTC m=+1688.504895574" watchObservedRunningTime="2026-01-27 12:54:21.269713448 +0000 UTC m=+1688.506741658" Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.285745 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-cjbb2" podStartSLOduration=7.285713882 podStartE2EDuration="7.285713882s" podCreationTimestamp="2026-01-27 12:54:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:54:21.238512763 +0000 UTC m=+1688.475540993" watchObservedRunningTime="2026-01-27 12:54:21.285713882 +0000 UTC m=+1688.522742102" Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.772667 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.772739 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.926936 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.927024 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 27 12:54:21 crc kubenswrapper[4900]: I0127 12:54:21.992205 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:22 crc kubenswrapper[4900]: I0127 12:54:22.009395 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 27 12:54:22 crc kubenswrapper[4900]: I0127 12:54:22.009438 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 27 12:54:22 crc kubenswrapper[4900]: I0127 12:54:22.062760 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 27 12:54:22 crc kubenswrapper[4900]: I0127 12:54:22.234969 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00248c3f-554f-4360-aad4-9dd642eccf99","Type":"ContainerStarted","Data":"a74c693832fccbd0c134ca600dcc21213cf57e2ea4e26eea0ea9072e40e0874a"} Jan 27 12:54:22 crc kubenswrapper[4900]: I0127 12:54:22.250566 4900 generic.go:334] "Generic (PLEG): container finished" podID="6ca41e27-112f-461c-9556-768eea0cbdf6" containerID="20dd8acb9a403c81b8724c37e6493d41bfa2fe5161dcb476bc2711af27a46021" exitCode=143 Jan 27 12:54:22 crc kubenswrapper[4900]: I0127 12:54:22.251950 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ca41e27-112f-461c-9556-768eea0cbdf6","Type":"ContainerDied","Data":"20dd8acb9a403c81b8724c37e6493d41bfa2fe5161dcb476bc2711af27a46021"} Jan 27 12:54:22 crc kubenswrapper[4900]: I0127 12:54:22.400892 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 27 12:54:22 crc kubenswrapper[4900]: I0127 12:54:22.856496 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="34251519-108d-402e-9848-301fa422c83f" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.245:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 12:54:22 crc kubenswrapper[4900]: I0127 12:54:22.857251 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="34251519-108d-402e-9848-301fa422c83f" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.245:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 12:54:23 crc kubenswrapper[4900]: I0127 12:54:23.270828 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00248c3f-554f-4360-aad4-9dd642eccf99","Type":"ContainerStarted","Data":"4b20bd641d9bcd1c9494f4e55c3e92935ab4cfb59d02fc7b56f79dfbc53f1a4b"} Jan 27 12:54:25 crc kubenswrapper[4900]: I0127 12:54:25.689806 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00248c3f-554f-4360-aad4-9dd642eccf99","Type":"ContainerStarted","Data":"d8a2e50f8fd6d787bc58416c0f2aa1d34e25aaa6898c22ac1e748213e7ff8d6f"} Jan 27 12:54:25 crc kubenswrapper[4900]: I0127 12:54:25.704405 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"182248e0-13c8-4d04-bfd2-f15598f4820f","Type":"ContainerStarted","Data":"9d109b504e305d5e427b44b94697f7a7eb084a8f73dc477bac3c6a96fe7f2c24"} Jan 27 12:54:25 crc kubenswrapper[4900]: I0127 12:54:25.704651 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-api" containerID="cri-o://9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502" gracePeriod=30 Jan 27 12:54:25 crc kubenswrapper[4900]: I0127 12:54:25.705577 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-listener" containerID="cri-o://9d109b504e305d5e427b44b94697f7a7eb084a8f73dc477bac3c6a96fe7f2c24" gracePeriod=30 Jan 27 12:54:25 crc kubenswrapper[4900]: I0127 12:54:25.705651 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-notifier" containerID="cri-o://b83ebc7ad00e2b1e70dd33ea59f5b764b73be85e089840b8deef1184a993e662" gracePeriod=30 Jan 27 12:54:25 crc kubenswrapper[4900]: I0127 12:54:25.705705 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-evaluator" containerID="cri-o://51eca2052983c39961bc7495089e560c7e1709bfe6705702c6bc3626f75eda66" gracePeriod=30 Jan 27 12:54:26 crc kubenswrapper[4900]: E0127 12:54:26.210096 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod182248e0_13c8_4d04_bfd2_f15598f4820f.slice/crio-9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod182248e0_13c8_4d04_bfd2_f15598f4820f.slice/crio-conmon-9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502.scope\": RecentStats: unable to find data in memory cache]" Jan 27 12:54:26 crc kubenswrapper[4900]: I0127 12:54:26.533483 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=3.889537681 podStartE2EDuration="20.533460553s" podCreationTimestamp="2026-01-27 12:54:06 +0000 UTC" firstStartedPulling="2026-01-27 12:54:07.572779786 +0000 UTC m=+1674.809807986" lastFinishedPulling="2026-01-27 12:54:24.216702648 +0000 UTC m=+1691.453730858" observedRunningTime="2026-01-27 12:54:25.7595686 +0000 UTC m=+1692.996596810" watchObservedRunningTime="2026-01-27 12:54:26.533460553 +0000 UTC m=+1693.770488763" Jan 27 12:54:26 crc kubenswrapper[4900]: I0127 12:54:26.725828 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00248c3f-554f-4360-aad4-9dd642eccf99","Type":"ContainerStarted","Data":"c052ead14ba9fc79c26767e2aba1acaf5a6d0e3e406e31242145e429fe21e451"} Jan 27 12:54:26 crc kubenswrapper[4900]: I0127 12:54:26.727730 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 12:54:26 crc kubenswrapper[4900]: I0127 12:54:26.731454 4900 generic.go:334] "Generic (PLEG): container finished" podID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerID="51eca2052983c39961bc7495089e560c7e1709bfe6705702c6bc3626f75eda66" exitCode=0 Jan 27 12:54:26 crc kubenswrapper[4900]: I0127 12:54:26.731513 4900 generic.go:334] "Generic (PLEG): container finished" podID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerID="9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502" exitCode=0 Jan 27 12:54:26 crc kubenswrapper[4900]: I0127 12:54:26.731563 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"182248e0-13c8-4d04-bfd2-f15598f4820f","Type":"ContainerDied","Data":"51eca2052983c39961bc7495089e560c7e1709bfe6705702c6bc3626f75eda66"} Jan 27 12:54:26 crc kubenswrapper[4900]: I0127 12:54:26.731615 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"182248e0-13c8-4d04-bfd2-f15598f4820f","Type":"ContainerDied","Data":"9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502"} Jan 27 12:54:26 crc kubenswrapper[4900]: I0127 12:54:26.755785 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.215946318 podStartE2EDuration="8.755757053s" podCreationTimestamp="2026-01-27 12:54:18 +0000 UTC" firstStartedPulling="2026-01-27 12:54:20.811779292 +0000 UTC m=+1688.048807502" lastFinishedPulling="2026-01-27 12:54:26.351590017 +0000 UTC m=+1693.588618237" observedRunningTime="2026-01-27 12:54:26.749341677 +0000 UTC m=+1693.986369897" watchObservedRunningTime="2026-01-27 12:54:26.755757053 +0000 UTC m=+1693.992785263" Jan 27 12:54:27 crc kubenswrapper[4900]: I0127 12:54:27.323986 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:54:27 crc kubenswrapper[4900]: I0127 12:54:27.448976 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-nlfhl"] Jan 27 12:54:27 crc kubenswrapper[4900]: I0127 12:54:27.449260 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" podUID="e3964262-2f6d-40bd-8c37-6cf8ccf1312a" containerName="dnsmasq-dns" containerID="cri-o://8c1d9e3045ec4e5afd8ab3888e93dbc673e13d1a7754e872483add308efc6c44" gracePeriod=10 Jan 27 12:54:27 crc kubenswrapper[4900]: I0127 12:54:27.483717 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:54:27 crc kubenswrapper[4900]: E0127 12:54:27.483984 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:54:27 crc kubenswrapper[4900]: I0127 12:54:27.771786 4900 generic.go:334] "Generic (PLEG): container finished" podID="e3964262-2f6d-40bd-8c37-6cf8ccf1312a" containerID="8c1d9e3045ec4e5afd8ab3888e93dbc673e13d1a7754e872483add308efc6c44" exitCode=0 Jan 27 12:54:27 crc kubenswrapper[4900]: I0127 12:54:27.771856 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" event={"ID":"e3964262-2f6d-40bd-8c37-6cf8ccf1312a","Type":"ContainerDied","Data":"8c1d9e3045ec4e5afd8ab3888e93dbc673e13d1a7754e872483add308efc6c44"} Jan 27 12:54:27 crc kubenswrapper[4900]: I0127 12:54:27.775736 4900 generic.go:334] "Generic (PLEG): container finished" podID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerID="b83ebc7ad00e2b1e70dd33ea59f5b764b73be85e089840b8deef1184a993e662" exitCode=0 Jan 27 12:54:27 crc kubenswrapper[4900]: I0127 12:54:27.776869 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"182248e0-13c8-4d04-bfd2-f15598f4820f","Type":"ContainerDied","Data":"b83ebc7ad00e2b1e70dd33ea59f5b764b73be85e089840b8deef1184a993e662"} Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.530204 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.667463 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vl2kt\" (UniqueName: \"kubernetes.io/projected/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-kube-api-access-vl2kt\") pod \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.667598 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-config\") pod \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.667720 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-ovsdbserver-nb\") pod \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.667752 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-dns-swift-storage-0\") pod \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.667913 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-ovsdbserver-sb\") pod \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.668132 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-dns-svc\") pod \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\" (UID: \"e3964262-2f6d-40bd-8c37-6cf8ccf1312a\") " Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.714920 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-kube-api-access-vl2kt" (OuterVolumeSpecName: "kube-api-access-vl2kt") pod "e3964262-2f6d-40bd-8c37-6cf8ccf1312a" (UID: "e3964262-2f6d-40bd-8c37-6cf8ccf1312a"). InnerVolumeSpecName "kube-api-access-vl2kt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.757155 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-config" (OuterVolumeSpecName: "config") pod "e3964262-2f6d-40bd-8c37-6cf8ccf1312a" (UID: "e3964262-2f6d-40bd-8c37-6cf8ccf1312a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.762253 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e3964262-2f6d-40bd-8c37-6cf8ccf1312a" (UID: "e3964262-2f6d-40bd-8c37-6cf8ccf1312a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.772794 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.772848 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vl2kt\" (UniqueName: \"kubernetes.io/projected/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-kube-api-access-vl2kt\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.772862 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.798890 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e3964262-2f6d-40bd-8c37-6cf8ccf1312a" (UID: "e3964262-2f6d-40bd-8c37-6cf8ccf1312a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.803820 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e3964262-2f6d-40bd-8c37-6cf8ccf1312a" (UID: "e3964262-2f6d-40bd-8c37-6cf8ccf1312a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.803876 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e3964262-2f6d-40bd-8c37-6cf8ccf1312a" (UID: "e3964262-2f6d-40bd-8c37-6cf8ccf1312a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.837773 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.837653 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-nlfhl" event={"ID":"e3964262-2f6d-40bd-8c37-6cf8ccf1312a","Type":"ContainerDied","Data":"c0d75bf1733a77aa847357bf3ab5a213fa48395a4f2086192f3a8da4582811a1"} Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.839119 4900 scope.go:117] "RemoveContainer" containerID="8c1d9e3045ec4e5afd8ab3888e93dbc673e13d1a7754e872483add308efc6c44" Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.843544 4900 generic.go:334] "Generic (PLEG): container finished" podID="eff943df-8066-483c-a0c7-0fb3f7346380" containerID="a6f20a452a2479b3b0a8ab5d857f98014bff4d463aea096cf23ede2b3b90a24f" exitCode=0 Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.843642 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tp8r6" event={"ID":"eff943df-8066-483c-a0c7-0fb3f7346380","Type":"ContainerDied","Data":"a6f20a452a2479b3b0a8ab5d857f98014bff4d463aea096cf23ede2b3b90a24f"} Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.876482 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.876520 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:28 crc kubenswrapper[4900]: I0127 12:54:28.876532 4900 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e3964262-2f6d-40bd-8c37-6cf8ccf1312a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:29 crc kubenswrapper[4900]: I0127 12:54:29.012996 4900 scope.go:117] "RemoveContainer" containerID="e4b61be158c5837607b67cb0c510d24fb7c5d0f4e84cf1ab162815edcebc8546" Jan 27 12:54:29 crc kubenswrapper[4900]: I0127 12:54:29.028033 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-nlfhl"] Jan 27 12:54:29 crc kubenswrapper[4900]: I0127 12:54:29.041168 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-nlfhl"] Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.499158 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3964262-2f6d-40bd-8c37-6cf8ccf1312a" path="/var/lib/kubelet/pods/e3964262-2f6d-40bd-8c37-6cf8ccf1312a/volumes" Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.501402 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.519076 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-scripts\") pod \"eff943df-8066-483c-a0c7-0fb3f7346380\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.519508 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-combined-ca-bundle\") pod \"eff943df-8066-483c-a0c7-0fb3f7346380\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.519660 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-885kl\" (UniqueName: \"kubernetes.io/projected/eff943df-8066-483c-a0c7-0fb3f7346380-kube-api-access-885kl\") pod \"eff943df-8066-483c-a0c7-0fb3f7346380\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.519787 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-config-data\") pod \"eff943df-8066-483c-a0c7-0fb3f7346380\" (UID: \"eff943df-8066-483c-a0c7-0fb3f7346380\") " Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.544962 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-scripts" (OuterVolumeSpecName: "scripts") pod "eff943df-8066-483c-a0c7-0fb3f7346380" (UID: "eff943df-8066-483c-a0c7-0fb3f7346380"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.549290 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eff943df-8066-483c-a0c7-0fb3f7346380-kube-api-access-885kl" (OuterVolumeSpecName: "kube-api-access-885kl") pod "eff943df-8066-483c-a0c7-0fb3f7346380" (UID: "eff943df-8066-483c-a0c7-0fb3f7346380"). InnerVolumeSpecName "kube-api-access-885kl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.608260 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eff943df-8066-483c-a0c7-0fb3f7346380" (UID: "eff943df-8066-483c-a0c7-0fb3f7346380"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.612404 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-config-data" (OuterVolumeSpecName: "config-data") pod "eff943df-8066-483c-a0c7-0fb3f7346380" (UID: "eff943df-8066-483c-a0c7-0fb3f7346380"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.624818 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.624873 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-885kl\" (UniqueName: \"kubernetes.io/projected/eff943df-8066-483c-a0c7-0fb3f7346380-kube-api-access-885kl\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.624889 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.624899 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eff943df-8066-483c-a0c7-0fb3f7346380-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.877782 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-tp8r6" event={"ID":"eff943df-8066-483c-a0c7-0fb3f7346380","Type":"ContainerDied","Data":"3dd40f5c799a1cd9efd1e857050671f2e604dfb6e06ed89e6390aa5e70c45b9c"} Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.878293 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3dd40f5c799a1cd9efd1e857050671f2e604dfb6e06ed89e6390aa5e70c45b9c" Jan 27 12:54:30 crc kubenswrapper[4900]: I0127 12:54:30.877833 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-tp8r6" Jan 27 12:54:31 crc kubenswrapper[4900]: I0127 12:54:31.059615 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:54:31 crc kubenswrapper[4900]: I0127 12:54:31.059981 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="34251519-108d-402e-9848-301fa422c83f" containerName="nova-api-log" containerID="cri-o://bcb754f6b8528393654c2b0115036cdfe50c9365ab7a6937430e308acef3ee90" gracePeriod=30 Jan 27 12:54:31 crc kubenswrapper[4900]: I0127 12:54:31.060223 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="34251519-108d-402e-9848-301fa422c83f" containerName="nova-api-api" containerID="cri-o://a466edcf9a944ad25581df48045bee2a52f5695613115bf6e843f0e62c75dd8f" gracePeriod=30 Jan 27 12:54:31 crc kubenswrapper[4900]: I0127 12:54:31.093552 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:54:31 crc kubenswrapper[4900]: I0127 12:54:31.093837 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="7c07718a-743c-4b97-bd07-ff1192928e8f" containerName="nova-scheduler-scheduler" containerID="cri-o://8417589e6cb3afd2a44bfc830560b0893a0286e4cc3bd736f93b7220c1797f0c" gracePeriod=30 Jan 27 12:54:31 crc kubenswrapper[4900]: I0127 12:54:31.896121 4900 generic.go:334] "Generic (PLEG): container finished" podID="34251519-108d-402e-9848-301fa422c83f" containerID="bcb754f6b8528393654c2b0115036cdfe50c9365ab7a6937430e308acef3ee90" exitCode=143 Jan 27 12:54:31 crc kubenswrapper[4900]: I0127 12:54:31.896205 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"34251519-108d-402e-9848-301fa422c83f","Type":"ContainerDied","Data":"bcb754f6b8528393654c2b0115036cdfe50c9365ab7a6937430e308acef3ee90"} Jan 27 12:54:32 crc kubenswrapper[4900]: E0127 12:54:32.014133 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8417589e6cb3afd2a44bfc830560b0893a0286e4cc3bd736f93b7220c1797f0c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 27 12:54:32 crc kubenswrapper[4900]: E0127 12:54:32.015945 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8417589e6cb3afd2a44bfc830560b0893a0286e4cc3bd736f93b7220c1797f0c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 27 12:54:32 crc kubenswrapper[4900]: E0127 12:54:32.017503 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8417589e6cb3afd2a44bfc830560b0893a0286e4cc3bd736f93b7220c1797f0c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 27 12:54:32 crc kubenswrapper[4900]: E0127 12:54:32.017587 4900 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="7c07718a-743c-4b97-bd07-ff1192928e8f" containerName="nova-scheduler-scheduler" Jan 27 12:54:32 crc kubenswrapper[4900]: I0127 12:54:32.939629 4900 generic.go:334] "Generic (PLEG): container finished" podID="c0c2f998-53fe-4850-a061-6f275bb39313" containerID="dd17466485a03db1dd63a32a13720b1520909966fe30dc93ac38c95b37286c6d" exitCode=0 Jan 27 12:54:32 crc kubenswrapper[4900]: I0127 12:54:32.939937 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-cjbb2" event={"ID":"c0c2f998-53fe-4850-a061-6f275bb39313","Type":"ContainerDied","Data":"dd17466485a03db1dd63a32a13720b1520909966fe30dc93ac38c95b37286c6d"} Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.546956 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.697130 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28vzm\" (UniqueName: \"kubernetes.io/projected/c0c2f998-53fe-4850-a061-6f275bb39313-kube-api-access-28vzm\") pod \"c0c2f998-53fe-4850-a061-6f275bb39313\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.697386 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-combined-ca-bundle\") pod \"c0c2f998-53fe-4850-a061-6f275bb39313\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.697568 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-scripts\") pod \"c0c2f998-53fe-4850-a061-6f275bb39313\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.697620 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-config-data\") pod \"c0c2f998-53fe-4850-a061-6f275bb39313\" (UID: \"c0c2f998-53fe-4850-a061-6f275bb39313\") " Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.706840 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0c2f998-53fe-4850-a061-6f275bb39313-kube-api-access-28vzm" (OuterVolumeSpecName: "kube-api-access-28vzm") pod "c0c2f998-53fe-4850-a061-6f275bb39313" (UID: "c0c2f998-53fe-4850-a061-6f275bb39313"). InnerVolumeSpecName "kube-api-access-28vzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.708290 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-scripts" (OuterVolumeSpecName: "scripts") pod "c0c2f998-53fe-4850-a061-6f275bb39313" (UID: "c0c2f998-53fe-4850-a061-6f275bb39313"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.756193 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c0c2f998-53fe-4850-a061-6f275bb39313" (UID: "c0c2f998-53fe-4850-a061-6f275bb39313"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.779979 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-config-data" (OuterVolumeSpecName: "config-data") pod "c0c2f998-53fe-4850-a061-6f275bb39313" (UID: "c0c2f998-53fe-4850-a061-6f275bb39313"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.808127 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28vzm\" (UniqueName: \"kubernetes.io/projected/c0c2f998-53fe-4850-a061-6f275bb39313-kube-api-access-28vzm\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.808170 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.808183 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.808195 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c2f998-53fe-4850-a061-6f275bb39313-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.913478 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.966284 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-cjbb2" event={"ID":"c0c2f998-53fe-4850-a061-6f275bb39313","Type":"ContainerDied","Data":"d5534d761c11edb0725424babdce745732349ab47255ee7ed85c24c60a913426"} Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.966343 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d5534d761c11edb0725424babdce745732349ab47255ee7ed85c24c60a913426" Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.966347 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-cjbb2" Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.981538 4900 generic.go:334] "Generic (PLEG): container finished" podID="34251519-108d-402e-9848-301fa422c83f" containerID="a466edcf9a944ad25581df48045bee2a52f5695613115bf6e843f0e62c75dd8f" exitCode=0 Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.981640 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.981657 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"34251519-108d-402e-9848-301fa422c83f","Type":"ContainerDied","Data":"a466edcf9a944ad25581df48045bee2a52f5695613115bf6e843f0e62c75dd8f"} Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.981851 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"34251519-108d-402e-9848-301fa422c83f","Type":"ContainerDied","Data":"687846f04bc576dadd1e347e8989a8642fd4b388aec9c58c6c2d3cf44d8529f5"} Jan 27 12:54:34 crc kubenswrapper[4900]: I0127 12:54:34.981877 4900 scope.go:117] "RemoveContainer" containerID="a466edcf9a944ad25581df48045bee2a52f5695613115bf6e843f0e62c75dd8f" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.021252 4900 scope.go:117] "RemoveContainer" containerID="bcb754f6b8528393654c2b0115036cdfe50c9365ab7a6937430e308acef3ee90" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.051762 4900 scope.go:117] "RemoveContainer" containerID="a466edcf9a944ad25581df48045bee2a52f5695613115bf6e843f0e62c75dd8f" Jan 27 12:54:35 crc kubenswrapper[4900]: E0127 12:54:35.055286 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a466edcf9a944ad25581df48045bee2a52f5695613115bf6e843f0e62c75dd8f\": container with ID starting with a466edcf9a944ad25581df48045bee2a52f5695613115bf6e843f0e62c75dd8f not found: ID does not exist" containerID="a466edcf9a944ad25581df48045bee2a52f5695613115bf6e843f0e62c75dd8f" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.056137 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a466edcf9a944ad25581df48045bee2a52f5695613115bf6e843f0e62c75dd8f"} err="failed to get container status \"a466edcf9a944ad25581df48045bee2a52f5695613115bf6e843f0e62c75dd8f\": rpc error: code = NotFound desc = could not find container \"a466edcf9a944ad25581df48045bee2a52f5695613115bf6e843f0e62c75dd8f\": container with ID starting with a466edcf9a944ad25581df48045bee2a52f5695613115bf6e843f0e62c75dd8f not found: ID does not exist" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.056175 4900 scope.go:117] "RemoveContainer" containerID="bcb754f6b8528393654c2b0115036cdfe50c9365ab7a6937430e308acef3ee90" Jan 27 12:54:35 crc kubenswrapper[4900]: E0127 12:54:35.058954 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcb754f6b8528393654c2b0115036cdfe50c9365ab7a6937430e308acef3ee90\": container with ID starting with bcb754f6b8528393654c2b0115036cdfe50c9365ab7a6937430e308acef3ee90 not found: ID does not exist" containerID="bcb754f6b8528393654c2b0115036cdfe50c9365ab7a6937430e308acef3ee90" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.059121 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcb754f6b8528393654c2b0115036cdfe50c9365ab7a6937430e308acef3ee90"} err="failed to get container status \"bcb754f6b8528393654c2b0115036cdfe50c9365ab7a6937430e308acef3ee90\": rpc error: code = NotFound desc = could not find container \"bcb754f6b8528393654c2b0115036cdfe50c9365ab7a6937430e308acef3ee90\": container with ID starting with bcb754f6b8528393654c2b0115036cdfe50c9365ab7a6937430e308acef3ee90 not found: ID does not exist" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.092325 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 27 12:54:35 crc kubenswrapper[4900]: E0127 12:54:35.093095 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34251519-108d-402e-9848-301fa422c83f" containerName="nova-api-api" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.093120 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="34251519-108d-402e-9848-301fa422c83f" containerName="nova-api-api" Jan 27 12:54:35 crc kubenswrapper[4900]: E0127 12:54:35.093153 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0c2f998-53fe-4850-a061-6f275bb39313" containerName="nova-cell1-conductor-db-sync" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.093163 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0c2f998-53fe-4850-a061-6f275bb39313" containerName="nova-cell1-conductor-db-sync" Jan 27 12:54:35 crc kubenswrapper[4900]: E0127 12:54:35.093202 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3964262-2f6d-40bd-8c37-6cf8ccf1312a" containerName="init" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.093209 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3964262-2f6d-40bd-8c37-6cf8ccf1312a" containerName="init" Jan 27 12:54:35 crc kubenswrapper[4900]: E0127 12:54:35.093229 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34251519-108d-402e-9848-301fa422c83f" containerName="nova-api-log" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.093236 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="34251519-108d-402e-9848-301fa422c83f" containerName="nova-api-log" Jan 27 12:54:35 crc kubenswrapper[4900]: E0127 12:54:35.093256 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3964262-2f6d-40bd-8c37-6cf8ccf1312a" containerName="dnsmasq-dns" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.093267 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3964262-2f6d-40bd-8c37-6cf8ccf1312a" containerName="dnsmasq-dns" Jan 27 12:54:35 crc kubenswrapper[4900]: E0127 12:54:35.093286 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eff943df-8066-483c-a0c7-0fb3f7346380" containerName="nova-manage" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.093295 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="eff943df-8066-483c-a0c7-0fb3f7346380" containerName="nova-manage" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.093568 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="eff943df-8066-483c-a0c7-0fb3f7346380" containerName="nova-manage" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.093605 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0c2f998-53fe-4850-a061-6f275bb39313" containerName="nova-cell1-conductor-db-sync" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.093622 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="34251519-108d-402e-9848-301fa422c83f" containerName="nova-api-api" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.093636 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3964262-2f6d-40bd-8c37-6cf8ccf1312a" containerName="dnsmasq-dns" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.093653 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="34251519-108d-402e-9848-301fa422c83f" containerName="nova-api-log" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.094803 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.099936 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.117038 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34251519-108d-402e-9848-301fa422c83f-logs\") pod \"34251519-108d-402e-9848-301fa422c83f\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.117402 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34251519-108d-402e-9848-301fa422c83f-config-data\") pod \"34251519-108d-402e-9848-301fa422c83f\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.117736 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34251519-108d-402e-9848-301fa422c83f-combined-ca-bundle\") pod \"34251519-108d-402e-9848-301fa422c83f\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.117867 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsmfl\" (UniqueName: \"kubernetes.io/projected/34251519-108d-402e-9848-301fa422c83f-kube-api-access-qsmfl\") pod \"34251519-108d-402e-9848-301fa422c83f\" (UID: \"34251519-108d-402e-9848-301fa422c83f\") " Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.118073 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34251519-108d-402e-9848-301fa422c83f-logs" (OuterVolumeSpecName: "logs") pod "34251519-108d-402e-9848-301fa422c83f" (UID: "34251519-108d-402e-9848-301fa422c83f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.118988 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34251519-108d-402e-9848-301fa422c83f-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.128589 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34251519-108d-402e-9848-301fa422c83f-kube-api-access-qsmfl" (OuterVolumeSpecName: "kube-api-access-qsmfl") pod "34251519-108d-402e-9848-301fa422c83f" (UID: "34251519-108d-402e-9848-301fa422c83f"). InnerVolumeSpecName "kube-api-access-qsmfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.143207 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.167344 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34251519-108d-402e-9848-301fa422c83f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "34251519-108d-402e-9848-301fa422c83f" (UID: "34251519-108d-402e-9848-301fa422c83f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.176160 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34251519-108d-402e-9848-301fa422c83f-config-data" (OuterVolumeSpecName: "config-data") pod "34251519-108d-402e-9848-301fa422c83f" (UID: "34251519-108d-402e-9848-301fa422c83f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.221559 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e31cf8e-3c3b-4afa-a114-99ee840e8234-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"8e31cf8e-3c3b-4afa-a114-99ee840e8234\") " pod="openstack/nova-cell1-conductor-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.221725 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e31cf8e-3c3b-4afa-a114-99ee840e8234-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"8e31cf8e-3c3b-4afa-a114-99ee840e8234\") " pod="openstack/nova-cell1-conductor-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.221851 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7h87\" (UniqueName: \"kubernetes.io/projected/8e31cf8e-3c3b-4afa-a114-99ee840e8234-kube-api-access-n7h87\") pod \"nova-cell1-conductor-0\" (UID: \"8e31cf8e-3c3b-4afa-a114-99ee840e8234\") " pod="openstack/nova-cell1-conductor-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.221997 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34251519-108d-402e-9848-301fa422c83f-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.222071 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34251519-108d-402e-9848-301fa422c83f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.222091 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsmfl\" (UniqueName: \"kubernetes.io/projected/34251519-108d-402e-9848-301fa422c83f-kube-api-access-qsmfl\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.325917 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e31cf8e-3c3b-4afa-a114-99ee840e8234-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"8e31cf8e-3c3b-4afa-a114-99ee840e8234\") " pod="openstack/nova-cell1-conductor-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.326217 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7h87\" (UniqueName: \"kubernetes.io/projected/8e31cf8e-3c3b-4afa-a114-99ee840e8234-kube-api-access-n7h87\") pod \"nova-cell1-conductor-0\" (UID: \"8e31cf8e-3c3b-4afa-a114-99ee840e8234\") " pod="openstack/nova-cell1-conductor-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.326338 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e31cf8e-3c3b-4afa-a114-99ee840e8234-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"8e31cf8e-3c3b-4afa-a114-99ee840e8234\") " pod="openstack/nova-cell1-conductor-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.333912 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e31cf8e-3c3b-4afa-a114-99ee840e8234-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"8e31cf8e-3c3b-4afa-a114-99ee840e8234\") " pod="openstack/nova-cell1-conductor-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.336324 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e31cf8e-3c3b-4afa-a114-99ee840e8234-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"8e31cf8e-3c3b-4afa-a114-99ee840e8234\") " pod="openstack/nova-cell1-conductor-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.343751 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.359929 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.360878 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7h87\" (UniqueName: \"kubernetes.io/projected/8e31cf8e-3c3b-4afa-a114-99ee840e8234-kube-api-access-n7h87\") pod \"nova-cell1-conductor-0\" (UID: \"8e31cf8e-3c3b-4afa-a114-99ee840e8234\") " pod="openstack/nova-cell1-conductor-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.374095 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.376546 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.379380 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.387493 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.423193 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.535632 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db60a14a-4df5-4346-b556-b937aa93d253-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " pod="openstack/nova-api-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.535948 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db60a14a-4df5-4346-b556-b937aa93d253-logs\") pod \"nova-api-0\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " pod="openstack/nova-api-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.535988 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db60a14a-4df5-4346-b556-b937aa93d253-config-data\") pod \"nova-api-0\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " pod="openstack/nova-api-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.536007 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqqxf\" (UniqueName: \"kubernetes.io/projected/db60a14a-4df5-4346-b556-b937aa93d253-kube-api-access-nqqxf\") pod \"nova-api-0\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " pod="openstack/nova-api-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.639157 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db60a14a-4df5-4346-b556-b937aa93d253-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " pod="openstack/nova-api-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.639271 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db60a14a-4df5-4346-b556-b937aa93d253-logs\") pod \"nova-api-0\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " pod="openstack/nova-api-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.639327 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db60a14a-4df5-4346-b556-b937aa93d253-config-data\") pod \"nova-api-0\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " pod="openstack/nova-api-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.639352 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqqxf\" (UniqueName: \"kubernetes.io/projected/db60a14a-4df5-4346-b556-b937aa93d253-kube-api-access-nqqxf\") pod \"nova-api-0\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " pod="openstack/nova-api-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.640554 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db60a14a-4df5-4346-b556-b937aa93d253-logs\") pod \"nova-api-0\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " pod="openstack/nova-api-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.662011 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db60a14a-4df5-4346-b556-b937aa93d253-config-data\") pod \"nova-api-0\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " pod="openstack/nova-api-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.663514 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db60a14a-4df5-4346-b556-b937aa93d253-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " pod="openstack/nova-api-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.665596 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqqxf\" (UniqueName: \"kubernetes.io/projected/db60a14a-4df5-4346-b556-b937aa93d253-kube-api-access-nqqxf\") pod \"nova-api-0\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " pod="openstack/nova-api-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.941640 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 12:54:35 crc kubenswrapper[4900]: I0127 12:54:35.948415 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 27 12:54:35 crc kubenswrapper[4900]: W0127 12:54:35.948940 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e31cf8e_3c3b_4afa_a114_99ee840e8234.slice/crio-65876a973649a6c6def36448ae2d7ca004484e307787d15b781698596287db5b WatchSource:0}: Error finding container 65876a973649a6c6def36448ae2d7ca004484e307787d15b781698596287db5b: Status 404 returned error can't find the container with id 65876a973649a6c6def36448ae2d7ca004484e307787d15b781698596287db5b Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.003048 4900 generic.go:334] "Generic (PLEG): container finished" podID="7c07718a-743c-4b97-bd07-ff1192928e8f" containerID="8417589e6cb3afd2a44bfc830560b0893a0286e4cc3bd736f93b7220c1797f0c" exitCode=0 Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.003136 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c07718a-743c-4b97-bd07-ff1192928e8f","Type":"ContainerDied","Data":"8417589e6cb3afd2a44bfc830560b0893a0286e4cc3bd736f93b7220c1797f0c"} Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.005649 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"8e31cf8e-3c3b-4afa-a114-99ee840e8234","Type":"ContainerStarted","Data":"65876a973649a6c6def36448ae2d7ca004484e307787d15b781698596287db5b"} Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.043904 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.151204 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c07718a-743c-4b97-bd07-ff1192928e8f-combined-ca-bundle\") pod \"7c07718a-743c-4b97-bd07-ff1192928e8f\" (UID: \"7c07718a-743c-4b97-bd07-ff1192928e8f\") " Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.151539 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wq7t6\" (UniqueName: \"kubernetes.io/projected/7c07718a-743c-4b97-bd07-ff1192928e8f-kube-api-access-wq7t6\") pod \"7c07718a-743c-4b97-bd07-ff1192928e8f\" (UID: \"7c07718a-743c-4b97-bd07-ff1192928e8f\") " Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.151661 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c07718a-743c-4b97-bd07-ff1192928e8f-config-data\") pod \"7c07718a-743c-4b97-bd07-ff1192928e8f\" (UID: \"7c07718a-743c-4b97-bd07-ff1192928e8f\") " Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.170804 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c07718a-743c-4b97-bd07-ff1192928e8f-kube-api-access-wq7t6" (OuterVolumeSpecName: "kube-api-access-wq7t6") pod "7c07718a-743c-4b97-bd07-ff1192928e8f" (UID: "7c07718a-743c-4b97-bd07-ff1192928e8f"). InnerVolumeSpecName "kube-api-access-wq7t6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.245504 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c07718a-743c-4b97-bd07-ff1192928e8f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c07718a-743c-4b97-bd07-ff1192928e8f" (UID: "7c07718a-743c-4b97-bd07-ff1192928e8f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.256530 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c07718a-743c-4b97-bd07-ff1192928e8f-config-data" (OuterVolumeSpecName: "config-data") pod "7c07718a-743c-4b97-bd07-ff1192928e8f" (UID: "7c07718a-743c-4b97-bd07-ff1192928e8f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.278872 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c07718a-743c-4b97-bd07-ff1192928e8f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.279259 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wq7t6\" (UniqueName: \"kubernetes.io/projected/7c07718a-743c-4b97-bd07-ff1192928e8f-kube-api-access-wq7t6\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.279379 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c07718a-743c-4b97-bd07-ff1192928e8f-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.480753 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:54:36 crc kubenswrapper[4900]: I0127 12:54:36.513109 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34251519-108d-402e-9848-301fa422c83f" path="/var/lib/kubelet/pods/34251519-108d-402e-9848-301fa422c83f/volumes" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.020311 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c07718a-743c-4b97-bd07-ff1192928e8f","Type":"ContainerDied","Data":"648763f2746f5423f00c6ff2c552733604ace1b076c6a1c477988eb16718115f"} Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.020350 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.020367 4900 scope.go:117] "RemoveContainer" containerID="8417589e6cb3afd2a44bfc830560b0893a0286e4cc3bd736f93b7220c1797f0c" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.022135 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"db60a14a-4df5-4346-b556-b937aa93d253","Type":"ContainerStarted","Data":"2796075f15466ccfd808bed5d62d2a7a7dfeb4f1fe0b6bf24ca35bb68b9c67da"} Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.026221 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"8e31cf8e-3c3b-4afa-a114-99ee840e8234","Type":"ContainerStarted","Data":"23ded7901c40e9030d1a08ed477663677b83db567a61e126966e101fcd697bd6"} Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.026390 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.069970 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.101229 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.125417 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.12539236 podStartE2EDuration="2.12539236s" podCreationTimestamp="2026-01-27 12:54:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:54:37.080307522 +0000 UTC m=+1704.317335732" watchObservedRunningTime="2026-01-27 12:54:37.12539236 +0000 UTC m=+1704.362420570" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.170815 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:54:37 crc kubenswrapper[4900]: E0127 12:54:37.171928 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c07718a-743c-4b97-bd07-ff1192928e8f" containerName="nova-scheduler-scheduler" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.175542 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c07718a-743c-4b97-bd07-ff1192928e8f" containerName="nova-scheduler-scheduler" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.176175 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c07718a-743c-4b97-bd07-ff1192928e8f" containerName="nova-scheduler-scheduler" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.178997 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.186571 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.188659 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4x844\" (UniqueName: \"kubernetes.io/projected/385c54a0-5afc-4b5e-8357-567088537989-kube-api-access-4x844\") pod \"nova-scheduler-0\" (UID: \"385c54a0-5afc-4b5e-8357-567088537989\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.193038 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/385c54a0-5afc-4b5e-8357-567088537989-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"385c54a0-5afc-4b5e-8357-567088537989\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.194931 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/385c54a0-5afc-4b5e-8357-567088537989-config-data\") pod \"nova-scheduler-0\" (UID: \"385c54a0-5afc-4b5e-8357-567088537989\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.198212 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.300073 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/385c54a0-5afc-4b5e-8357-567088537989-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"385c54a0-5afc-4b5e-8357-567088537989\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.300393 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/385c54a0-5afc-4b5e-8357-567088537989-config-data\") pod \"nova-scheduler-0\" (UID: \"385c54a0-5afc-4b5e-8357-567088537989\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.300596 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4x844\" (UniqueName: \"kubernetes.io/projected/385c54a0-5afc-4b5e-8357-567088537989-kube-api-access-4x844\") pod \"nova-scheduler-0\" (UID: \"385c54a0-5afc-4b5e-8357-567088537989\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.306961 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/385c54a0-5afc-4b5e-8357-567088537989-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"385c54a0-5afc-4b5e-8357-567088537989\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.306961 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/385c54a0-5afc-4b5e-8357-567088537989-config-data\") pod \"nova-scheduler-0\" (UID: \"385c54a0-5afc-4b5e-8357-567088537989\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.321233 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4x844\" (UniqueName: \"kubernetes.io/projected/385c54a0-5afc-4b5e-8357-567088537989-kube-api-access-4x844\") pod \"nova-scheduler-0\" (UID: \"385c54a0-5afc-4b5e-8357-567088537989\") " pod="openstack/nova-scheduler-0" Jan 27 12:54:37 crc kubenswrapper[4900]: I0127 12:54:37.518399 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 12:54:38 crc kubenswrapper[4900]: I0127 12:54:38.059438 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:54:38 crc kubenswrapper[4900]: I0127 12:54:38.059963 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"db60a14a-4df5-4346-b556-b937aa93d253","Type":"ContainerStarted","Data":"570bc83dfa93ce18b751df1767c0525333d129f59da553f5bdc530e0817d0912"} Jan 27 12:54:38 crc kubenswrapper[4900]: I0127 12:54:38.059997 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"db60a14a-4df5-4346-b556-b937aa93d253","Type":"ContainerStarted","Data":"dcf6a9f92ebd78897491be3f732f93592e6f3c92233ab7514aca5ff7f72deffc"} Jan 27 12:54:38 crc kubenswrapper[4900]: I0127 12:54:38.103292 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.103252751 podStartE2EDuration="3.103252751s" podCreationTimestamp="2026-01-27 12:54:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:54:38.091044607 +0000 UTC m=+1705.328072837" watchObservedRunningTime="2026-01-27 12:54:38.103252751 +0000 UTC m=+1705.340280961" Jan 27 12:54:38 crc kubenswrapper[4900]: I0127 12:54:38.508866 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c07718a-743c-4b97-bd07-ff1192928e8f" path="/var/lib/kubelet/pods/7c07718a-743c-4b97-bd07-ff1192928e8f/volumes" Jan 27 12:54:39 crc kubenswrapper[4900]: I0127 12:54:39.074812 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"385c54a0-5afc-4b5e-8357-567088537989","Type":"ContainerStarted","Data":"d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3"} Jan 27 12:54:39 crc kubenswrapper[4900]: I0127 12:54:39.075309 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"385c54a0-5afc-4b5e-8357-567088537989","Type":"ContainerStarted","Data":"dbd297cc07f174ecb2343a0c7d99c1d65d601df8d3970e76116912bbbe1a91b3"} Jan 27 12:54:39 crc kubenswrapper[4900]: I0127 12:54:39.101184 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.101164802 podStartE2EDuration="2.101164802s" podCreationTimestamp="2026-01-27 12:54:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:54:39.092340246 +0000 UTC m=+1706.329368476" watchObservedRunningTime="2026-01-27 12:54:39.101164802 +0000 UTC m=+1706.338193012" Jan 27 12:54:41 crc kubenswrapper[4900]: I0127 12:54:41.526747 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:54:41 crc kubenswrapper[4900]: E0127 12:54:41.527619 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:54:42 crc kubenswrapper[4900]: I0127 12:54:42.519416 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 27 12:54:45 crc kubenswrapper[4900]: I0127 12:54:45.466724 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 27 12:54:45 crc kubenswrapper[4900]: I0127 12:54:45.942986 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 27 12:54:45 crc kubenswrapper[4900]: I0127 12:54:45.943076 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 27 12:54:47 crc kubenswrapper[4900]: I0127 12:54:47.035299 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="db60a14a-4df5-4346-b556-b937aa93d253" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.253:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 12:54:47 crc kubenswrapper[4900]: I0127 12:54:47.035391 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="db60a14a-4df5-4346-b556-b937aa93d253" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.253:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 12:54:47 crc kubenswrapper[4900]: I0127 12:54:47.519100 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 27 12:54:47 crc kubenswrapper[4900]: I0127 12:54:47.557881 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 27 12:54:48 crc kubenswrapper[4900]: I0127 12:54:48.470360 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 27 12:54:49 crc kubenswrapper[4900]: I0127 12:54:49.010728 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 27 12:54:51 crc kubenswrapper[4900]: I0127 12:54:51.472605 4900 generic.go:334] "Generic (PLEG): container finished" podID="73158450-c56d-45ec-a637-d46c435e81a0" containerID="82e5445b660704009749019a5db8dd12bf97a9baa5180f1b1b3675d38629f52b" exitCode=137 Jan 27 12:54:51 crc kubenswrapper[4900]: I0127 12:54:51.473387 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"73158450-c56d-45ec-a637-d46c435e81a0","Type":"ContainerDied","Data":"82e5445b660704009749019a5db8dd12bf97a9baa5180f1b1b3675d38629f52b"} Jan 27 12:54:51 crc kubenswrapper[4900]: I0127 12:54:51.492253 4900 generic.go:334] "Generic (PLEG): container finished" podID="6ca41e27-112f-461c-9556-768eea0cbdf6" containerID="9aec7db7488768345eb7e9451a0947238ec199e2ac9619425cb105cf10bf67ba" exitCode=137 Jan 27 12:54:51 crc kubenswrapper[4900]: I0127 12:54:51.492322 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ca41e27-112f-461c-9556-768eea0cbdf6","Type":"ContainerDied","Data":"9aec7db7488768345eb7e9451a0947238ec199e2ac9619425cb105cf10bf67ba"} Jan 27 12:54:51 crc kubenswrapper[4900]: I0127 12:54:51.785909 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:51 crc kubenswrapper[4900]: I0127 12:54:51.947550 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 12:54:51 crc kubenswrapper[4900]: I0127 12:54:51.950939 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73158450-c56d-45ec-a637-d46c435e81a0-combined-ca-bundle\") pod \"73158450-c56d-45ec-a637-d46c435e81a0\" (UID: \"73158450-c56d-45ec-a637-d46c435e81a0\") " Jan 27 12:54:51 crc kubenswrapper[4900]: I0127 12:54:51.951020 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xk2t\" (UniqueName: \"kubernetes.io/projected/73158450-c56d-45ec-a637-d46c435e81a0-kube-api-access-2xk2t\") pod \"73158450-c56d-45ec-a637-d46c435e81a0\" (UID: \"73158450-c56d-45ec-a637-d46c435e81a0\") " Jan 27 12:54:51 crc kubenswrapper[4900]: I0127 12:54:51.951160 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73158450-c56d-45ec-a637-d46c435e81a0-config-data\") pod \"73158450-c56d-45ec-a637-d46c435e81a0\" (UID: \"73158450-c56d-45ec-a637-d46c435e81a0\") " Jan 27 12:54:51 crc kubenswrapper[4900]: I0127 12:54:51.957004 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73158450-c56d-45ec-a637-d46c435e81a0-kube-api-access-2xk2t" (OuterVolumeSpecName: "kube-api-access-2xk2t") pod "73158450-c56d-45ec-a637-d46c435e81a0" (UID: "73158450-c56d-45ec-a637-d46c435e81a0"). InnerVolumeSpecName "kube-api-access-2xk2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:54:51 crc kubenswrapper[4900]: I0127 12:54:51.993034 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73158450-c56d-45ec-a637-d46c435e81a0-config-data" (OuterVolumeSpecName: "config-data") pod "73158450-c56d-45ec-a637-d46c435e81a0" (UID: "73158450-c56d-45ec-a637-d46c435e81a0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.009831 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73158450-c56d-45ec-a637-d46c435e81a0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "73158450-c56d-45ec-a637-d46c435e81a0" (UID: "73158450-c56d-45ec-a637-d46c435e81a0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.053435 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ca41e27-112f-461c-9556-768eea0cbdf6-config-data\") pod \"6ca41e27-112f-461c-9556-768eea0cbdf6\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.053581 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4crnr\" (UniqueName: \"kubernetes.io/projected/6ca41e27-112f-461c-9556-768eea0cbdf6-kube-api-access-4crnr\") pod \"6ca41e27-112f-461c-9556-768eea0cbdf6\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.053666 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ca41e27-112f-461c-9556-768eea0cbdf6-logs\") pod \"6ca41e27-112f-461c-9556-768eea0cbdf6\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.053735 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ca41e27-112f-461c-9556-768eea0cbdf6-combined-ca-bundle\") pod \"6ca41e27-112f-461c-9556-768eea0cbdf6\" (UID: \"6ca41e27-112f-461c-9556-768eea0cbdf6\") " Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.054202 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ca41e27-112f-461c-9556-768eea0cbdf6-logs" (OuterVolumeSpecName: "logs") pod "6ca41e27-112f-461c-9556-768eea0cbdf6" (UID: "6ca41e27-112f-461c-9556-768eea0cbdf6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.055303 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73158450-c56d-45ec-a637-d46c435e81a0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.055406 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xk2t\" (UniqueName: \"kubernetes.io/projected/73158450-c56d-45ec-a637-d46c435e81a0-kube-api-access-2xk2t\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.055470 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73158450-c56d-45ec-a637-d46c435e81a0-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.055539 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ca41e27-112f-461c-9556-768eea0cbdf6-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.057875 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ca41e27-112f-461c-9556-768eea0cbdf6-kube-api-access-4crnr" (OuterVolumeSpecName: "kube-api-access-4crnr") pod "6ca41e27-112f-461c-9556-768eea0cbdf6" (UID: "6ca41e27-112f-461c-9556-768eea0cbdf6"). InnerVolumeSpecName "kube-api-access-4crnr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.094799 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ca41e27-112f-461c-9556-768eea0cbdf6-config-data" (OuterVolumeSpecName: "config-data") pod "6ca41e27-112f-461c-9556-768eea0cbdf6" (UID: "6ca41e27-112f-461c-9556-768eea0cbdf6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.098377 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ca41e27-112f-461c-9556-768eea0cbdf6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ca41e27-112f-461c-9556-768eea0cbdf6" (UID: "6ca41e27-112f-461c-9556-768eea0cbdf6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.157535 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ca41e27-112f-461c-9556-768eea0cbdf6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.157577 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ca41e27-112f-461c-9556-768eea0cbdf6-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.157589 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4crnr\" (UniqueName: \"kubernetes.io/projected/6ca41e27-112f-461c-9556-768eea0cbdf6-kube-api-access-4crnr\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.485030 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:54:52 crc kubenswrapper[4900]: E0127 12:54:52.485417 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.520346 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ca41e27-112f-461c-9556-768eea0cbdf6","Type":"ContainerDied","Data":"28d9a70441d45d1d4866b522fe61e1b1691fa725b2001f0365f19f3aac8950e3"} Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.520371 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.520402 4900 scope.go:117] "RemoveContainer" containerID="9aec7db7488768345eb7e9451a0947238ec199e2ac9619425cb105cf10bf67ba" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.525083 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"73158450-c56d-45ec-a637-d46c435e81a0","Type":"ContainerDied","Data":"6a7752382091b366cbbcb0b44f0c38471593c1983a7cc52cd122608a92567e3e"} Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.525209 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.581180 4900 scope.go:117] "RemoveContainer" containerID="20dd8acb9a403c81b8724c37e6493d41bfa2fe5161dcb476bc2711af27a46021" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.597407 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.620883 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.672127 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.684022 4900 scope.go:117] "RemoveContainer" containerID="82e5445b660704009749019a5db8dd12bf97a9baa5180f1b1b3675d38629f52b" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.712723 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.737129 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 12:54:52 crc kubenswrapper[4900]: E0127 12:54:52.737801 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ca41e27-112f-461c-9556-768eea0cbdf6" containerName="nova-metadata-metadata" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.737823 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ca41e27-112f-461c-9556-768eea0cbdf6" containerName="nova-metadata-metadata" Jan 27 12:54:52 crc kubenswrapper[4900]: E0127 12:54:52.737841 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73158450-c56d-45ec-a637-d46c435e81a0" containerName="nova-cell1-novncproxy-novncproxy" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.737848 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="73158450-c56d-45ec-a637-d46c435e81a0" containerName="nova-cell1-novncproxy-novncproxy" Jan 27 12:54:52 crc kubenswrapper[4900]: E0127 12:54:52.737891 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ca41e27-112f-461c-9556-768eea0cbdf6" containerName="nova-metadata-log" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.737898 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ca41e27-112f-461c-9556-768eea0cbdf6" containerName="nova-metadata-log" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.738129 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ca41e27-112f-461c-9556-768eea0cbdf6" containerName="nova-metadata-log" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.738143 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="73158450-c56d-45ec-a637-d46c435e81a0" containerName="nova-cell1-novncproxy-novncproxy" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.738164 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ca41e27-112f-461c-9556-768eea0cbdf6" containerName="nova-metadata-metadata" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.739130 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.741973 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.742424 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.742558 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.767274 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.786813 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/1437f600-6b08-4386-b558-49c7fd39e118-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.788017 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/1437f600-6b08-4386-b558-49c7fd39e118-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.788088 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1437f600-6b08-4386-b558-49c7fd39e118-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.788265 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztzzm\" (UniqueName: \"kubernetes.io/projected/1437f600-6b08-4386-b558-49c7fd39e118-kube-api-access-ztzzm\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.788314 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1437f600-6b08-4386-b558-49c7fd39e118-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.788450 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.791667 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.795598 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.798824 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.814278 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.891279 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-config-data\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.891549 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/1437f600-6b08-4386-b558-49c7fd39e118-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.891639 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1437f600-6b08-4386-b558-49c7fd39e118-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.891763 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztzzm\" (UniqueName: \"kubernetes.io/projected/1437f600-6b08-4386-b558-49c7fd39e118-kube-api-access-ztzzm\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.891796 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1437f600-6b08-4386-b558-49c7fd39e118-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.891851 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-logs\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.891963 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.892002 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ph97t\" (UniqueName: \"kubernetes.io/projected/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-kube-api-access-ph97t\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.892134 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.892328 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/1437f600-6b08-4386-b558-49c7fd39e118-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.898764 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/1437f600-6b08-4386-b558-49c7fd39e118-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.899078 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1437f600-6b08-4386-b558-49c7fd39e118-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.902086 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1437f600-6b08-4386-b558-49c7fd39e118-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.903353 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/1437f600-6b08-4386-b558-49c7fd39e118-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.917271 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztzzm\" (UniqueName: \"kubernetes.io/projected/1437f600-6b08-4386-b558-49c7fd39e118-kube-api-access-ztzzm\") pod \"nova-cell1-novncproxy-0\" (UID: \"1437f600-6b08-4386-b558-49c7fd39e118\") " pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.995866 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-config-data\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.996120 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-logs\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.996222 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.996257 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ph97t\" (UniqueName: \"kubernetes.io/projected/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-kube-api-access-ph97t\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.996361 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:52 crc kubenswrapper[4900]: I0127 12:54:52.996918 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-logs\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:53 crc kubenswrapper[4900]: I0127 12:54:53.000290 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:53 crc kubenswrapper[4900]: I0127 12:54:53.000513 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:53 crc kubenswrapper[4900]: I0127 12:54:53.003623 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-config-data\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:53 crc kubenswrapper[4900]: I0127 12:54:53.025415 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ph97t\" (UniqueName: \"kubernetes.io/projected/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-kube-api-access-ph97t\") pod \"nova-metadata-0\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " pod="openstack/nova-metadata-0" Jan 27 12:54:53 crc kubenswrapper[4900]: I0127 12:54:53.064245 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:53 crc kubenswrapper[4900]: I0127 12:54:53.118588 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 12:54:53 crc kubenswrapper[4900]: W0127 12:54:53.703126 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1437f600_6b08_4386_b558_49c7fd39e118.slice/crio-c47d4739edbd00d3ea9c5a3425c0c42f611c74ae79739f845bf4afd550f1b811 WatchSource:0}: Error finding container c47d4739edbd00d3ea9c5a3425c0c42f611c74ae79739f845bf4afd550f1b811: Status 404 returned error can't find the container with id c47d4739edbd00d3ea9c5a3425c0c42f611c74ae79739f845bf4afd550f1b811 Jan 27 12:54:53 crc kubenswrapper[4900]: I0127 12:54:53.704739 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 27 12:54:53 crc kubenswrapper[4900]: I0127 12:54:53.863968 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:54:53 crc kubenswrapper[4900]: W0127 12:54:53.883226 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6fcc6fd_95bc_4e8a_896f_4a60268a9055.slice/crio-b7374ed198199be6dfefea4afbb39b9cd31ba1231562237b612bead9a34cf5fa WatchSource:0}: Error finding container b7374ed198199be6dfefea4afbb39b9cd31ba1231562237b612bead9a34cf5fa: Status 404 returned error can't find the container with id b7374ed198199be6dfefea4afbb39b9cd31ba1231562237b612bead9a34cf5fa Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.267809 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.268538 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="1eab4b44-ee86-4d03-99dc-ca014f5c7141" containerName="kube-state-metrics" containerID="cri-o://5ada64f44fa5b78970a0628bad9e8de2ccda0971814026d6d4d1a306611eedc5" gracePeriod=30 Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.431228 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.431891 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mysqld-exporter-0" podUID="2b8ccf3d-f0c3-4607-a421-da50e552cecf" containerName="mysqld-exporter" containerID="cri-o://3e519ecf3229697af080a0ed0c48694f3aba5f1d04da13b3b61fe36084aa42e9" gracePeriod=30 Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.511987 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ca41e27-112f-461c-9556-768eea0cbdf6" path="/var/lib/kubelet/pods/6ca41e27-112f-461c-9556-768eea0cbdf6/volumes" Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.512978 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73158450-c56d-45ec-a637-d46c435e81a0" path="/var/lib/kubelet/pods/73158450-c56d-45ec-a637-d46c435e81a0/volumes" Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.627657 4900 generic.go:334] "Generic (PLEG): container finished" podID="1eab4b44-ee86-4d03-99dc-ca014f5c7141" containerID="5ada64f44fa5b78970a0628bad9e8de2ccda0971814026d6d4d1a306611eedc5" exitCode=2 Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.627744 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1eab4b44-ee86-4d03-99dc-ca014f5c7141","Type":"ContainerDied","Data":"5ada64f44fa5b78970a0628bad9e8de2ccda0971814026d6d4d1a306611eedc5"} Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.629261 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"1437f600-6b08-4386-b558-49c7fd39e118","Type":"ContainerStarted","Data":"40e483647387f9d15523dfb7920a122d1d2cade257f0e143c2590b109dd65526"} Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.629299 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"1437f600-6b08-4386-b558-49c7fd39e118","Type":"ContainerStarted","Data":"c47d4739edbd00d3ea9c5a3425c0c42f611c74ae79739f845bf4afd550f1b811"} Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.670818 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.6708027640000003 podStartE2EDuration="2.670802764s" podCreationTimestamp="2026-01-27 12:54:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:54:54.666100038 +0000 UTC m=+1721.903128258" watchObservedRunningTime="2026-01-27 12:54:54.670802764 +0000 UTC m=+1721.907830974" Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.685351 4900 generic.go:334] "Generic (PLEG): container finished" podID="2b8ccf3d-f0c3-4607-a421-da50e552cecf" containerID="3e519ecf3229697af080a0ed0c48694f3aba5f1d04da13b3b61fe36084aa42e9" exitCode=2 Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.685464 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"2b8ccf3d-f0c3-4607-a421-da50e552cecf","Type":"ContainerDied","Data":"3e519ecf3229697af080a0ed0c48694f3aba5f1d04da13b3b61fe36084aa42e9"} Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.738504 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b6fcc6fd-95bc-4e8a-896f-4a60268a9055","Type":"ContainerStarted","Data":"2df98d116b0efe4afd1bc527418c7be73b3a2e18acfa2c26778b5edb919c172c"} Jan 27 12:54:54 crc kubenswrapper[4900]: I0127 12:54:54.738558 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b6fcc6fd-95bc-4e8a-896f-4a60268a9055","Type":"ContainerStarted","Data":"b7374ed198199be6dfefea4afbb39b9cd31ba1231562237b612bead9a34cf5fa"} Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.302531 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.467151 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.474746 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m54v6\" (UniqueName: \"kubernetes.io/projected/1eab4b44-ee86-4d03-99dc-ca014f5c7141-kube-api-access-m54v6\") pod \"1eab4b44-ee86-4d03-99dc-ca014f5c7141\" (UID: \"1eab4b44-ee86-4d03-99dc-ca014f5c7141\") " Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.501275 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1eab4b44-ee86-4d03-99dc-ca014f5c7141-kube-api-access-m54v6" (OuterVolumeSpecName: "kube-api-access-m54v6") pod "1eab4b44-ee86-4d03-99dc-ca014f5c7141" (UID: "1eab4b44-ee86-4d03-99dc-ca014f5c7141"). InnerVolumeSpecName "kube-api-access-m54v6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.578645 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8ccf3d-f0c3-4607-a421-da50e552cecf-config-data\") pod \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\" (UID: \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\") " Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.579267 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8ccf3d-f0c3-4607-a421-da50e552cecf-combined-ca-bundle\") pod \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\" (UID: \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\") " Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.579508 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hl95\" (UniqueName: \"kubernetes.io/projected/2b8ccf3d-f0c3-4607-a421-da50e552cecf-kube-api-access-7hl95\") pod \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\" (UID: \"2b8ccf3d-f0c3-4607-a421-da50e552cecf\") " Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.580631 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m54v6\" (UniqueName: \"kubernetes.io/projected/1eab4b44-ee86-4d03-99dc-ca014f5c7141-kube-api-access-m54v6\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.598440 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b8ccf3d-f0c3-4607-a421-da50e552cecf-kube-api-access-7hl95" (OuterVolumeSpecName: "kube-api-access-7hl95") pod "2b8ccf3d-f0c3-4607-a421-da50e552cecf" (UID: "2b8ccf3d-f0c3-4607-a421-da50e552cecf"). InnerVolumeSpecName "kube-api-access-7hl95". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.635002 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b8ccf3d-f0c3-4607-a421-da50e552cecf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2b8ccf3d-f0c3-4607-a421-da50e552cecf" (UID: "2b8ccf3d-f0c3-4607-a421-da50e552cecf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.666532 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b8ccf3d-f0c3-4607-a421-da50e552cecf-config-data" (OuterVolumeSpecName: "config-data") pod "2b8ccf3d-f0c3-4607-a421-da50e552cecf" (UID: "2b8ccf3d-f0c3-4607-a421-da50e552cecf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.683728 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hl95\" (UniqueName: \"kubernetes.io/projected/2b8ccf3d-f0c3-4607-a421-da50e552cecf-kube-api-access-7hl95\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.683776 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8ccf3d-f0c3-4607-a421-da50e552cecf-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.683791 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8ccf3d-f0c3-4607-a421-da50e552cecf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.761724 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b6fcc6fd-95bc-4e8a-896f-4a60268a9055","Type":"ContainerStarted","Data":"5eee246224bbda1ab6e78741a7940dbff1cfb5879cf971fae62208f7560f24c5"} Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.771357 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1eab4b44-ee86-4d03-99dc-ca014f5c7141","Type":"ContainerDied","Data":"44df8d0b6f39308f51b152745144aa85a2f7bb8a43282debde0a9129f004755e"} Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.771408 4900 scope.go:117] "RemoveContainer" containerID="5ada64f44fa5b78970a0628bad9e8de2ccda0971814026d6d4d1a306611eedc5" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.771547 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.795277 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"2b8ccf3d-f0c3-4607-a421-da50e552cecf","Type":"ContainerDied","Data":"bdd13345afd793409172d7a1e58729fb2bdb25c28332a684eac78e63aa3113a1"} Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.795349 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.830768 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.830746897 podStartE2EDuration="3.830746897s" podCreationTimestamp="2026-01-27 12:54:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:54:55.808980856 +0000 UTC m=+1723.046009076" watchObservedRunningTime="2026-01-27 12:54:55.830746897 +0000 UTC m=+1723.067775107" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.858749 4900 scope.go:117] "RemoveContainer" containerID="3e519ecf3229697af080a0ed0c48694f3aba5f1d04da13b3b61fe36084aa42e9" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.870024 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.899652 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.932938 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Jan 27 12:54:55 crc kubenswrapper[4900]: E0127 12:54:55.933931 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b8ccf3d-f0c3-4607-a421-da50e552cecf" containerName="mysqld-exporter" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.933959 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b8ccf3d-f0c3-4607-a421-da50e552cecf" containerName="mysqld-exporter" Jan 27 12:54:55 crc kubenswrapper[4900]: E0127 12:54:55.933971 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1eab4b44-ee86-4d03-99dc-ca014f5c7141" containerName="kube-state-metrics" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.933980 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="1eab4b44-ee86-4d03-99dc-ca014f5c7141" containerName="kube-state-metrics" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.934301 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b8ccf3d-f0c3-4607-a421-da50e552cecf" containerName="mysqld-exporter" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.934343 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="1eab4b44-ee86-4d03-99dc-ca014f5c7141" containerName="kube-state-metrics" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.935933 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.939487 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.939637 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.961369 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.962024 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.966838 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.966923 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.970141 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.997498 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7h9r\" (UniqueName: \"kubernetes.io/projected/48f40cde-5734-4689-9d38-0ebcb2099b1b-kube-api-access-l7h9r\") pod \"mysqld-exporter-0\" (UID: \"48f40cde-5734-4689-9d38-0ebcb2099b1b\") " pod="openstack/mysqld-exporter-0" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.997755 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48f40cde-5734-4689-9d38-0ebcb2099b1b-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"48f40cde-5734-4689-9d38-0ebcb2099b1b\") " pod="openstack/mysqld-exporter-0" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.997805 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48f40cde-5734-4689-9d38-0ebcb2099b1b-config-data\") pod \"mysqld-exporter-0\" (UID: \"48f40cde-5734-4689-9d38-0ebcb2099b1b\") " pod="openstack/mysqld-exporter-0" Jan 27 12:54:55 crc kubenswrapper[4900]: I0127 12:54:55.998357 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/48f40cde-5734-4689-9d38-0ebcb2099b1b-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"48f40cde-5734-4689-9d38-0ebcb2099b1b\") " pod="openstack/mysqld-exporter-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.002153 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.015087 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.027270 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.029710 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.031793 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.032254 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.041277 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.100978 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7h9r\" (UniqueName: \"kubernetes.io/projected/48f40cde-5734-4689-9d38-0ebcb2099b1b-kube-api-access-l7h9r\") pod \"mysqld-exporter-0\" (UID: \"48f40cde-5734-4689-9d38-0ebcb2099b1b\") " pod="openstack/mysqld-exporter-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.101091 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b\") " pod="openstack/kube-state-metrics-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.101607 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48f40cde-5734-4689-9d38-0ebcb2099b1b-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"48f40cde-5734-4689-9d38-0ebcb2099b1b\") " pod="openstack/mysqld-exporter-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.101661 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48f40cde-5734-4689-9d38-0ebcb2099b1b-config-data\") pod \"mysqld-exporter-0\" (UID: \"48f40cde-5734-4689-9d38-0ebcb2099b1b\") " pod="openstack/mysqld-exporter-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.101684 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b\") " pod="openstack/kube-state-metrics-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.101829 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/48f40cde-5734-4689-9d38-0ebcb2099b1b-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"48f40cde-5734-4689-9d38-0ebcb2099b1b\") " pod="openstack/mysqld-exporter-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.101856 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b\") " pod="openstack/kube-state-metrics-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.101886 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fkcx\" (UniqueName: \"kubernetes.io/projected/49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b-kube-api-access-4fkcx\") pod \"kube-state-metrics-0\" (UID: \"49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b\") " pod="openstack/kube-state-metrics-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.107181 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/48f40cde-5734-4689-9d38-0ebcb2099b1b-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"48f40cde-5734-4689-9d38-0ebcb2099b1b\") " pod="openstack/mysqld-exporter-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.107491 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48f40cde-5734-4689-9d38-0ebcb2099b1b-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"48f40cde-5734-4689-9d38-0ebcb2099b1b\") " pod="openstack/mysqld-exporter-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.107918 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48f40cde-5734-4689-9d38-0ebcb2099b1b-config-data\") pod \"mysqld-exporter-0\" (UID: \"48f40cde-5734-4689-9d38-0ebcb2099b1b\") " pod="openstack/mysqld-exporter-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.120683 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7h9r\" (UniqueName: \"kubernetes.io/projected/48f40cde-5734-4689-9d38-0ebcb2099b1b-kube-api-access-l7h9r\") pod \"mysqld-exporter-0\" (UID: \"48f40cde-5734-4689-9d38-0ebcb2099b1b\") " pod="openstack/mysqld-exporter-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.204812 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b\") " pod="openstack/kube-state-metrics-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.204931 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b\") " pod="openstack/kube-state-metrics-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.204955 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fkcx\" (UniqueName: \"kubernetes.io/projected/49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b-kube-api-access-4fkcx\") pod \"kube-state-metrics-0\" (UID: \"49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b\") " pod="openstack/kube-state-metrics-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.205144 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b\") " pod="openstack/kube-state-metrics-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.210022 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b\") " pod="openstack/kube-state-metrics-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.210044 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b\") " pod="openstack/kube-state-metrics-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.210260 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b\") " pod="openstack/kube-state-metrics-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.221668 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fkcx\" (UniqueName: \"kubernetes.io/projected/49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b-kube-api-access-4fkcx\") pod \"kube-state-metrics-0\" (UID: \"49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b\") " pod="openstack/kube-state-metrics-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.243560 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.262796 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.312662 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-config-data\") pod \"182248e0-13c8-4d04-bfd2-f15598f4820f\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.312813 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-combined-ca-bundle\") pod \"182248e0-13c8-4d04-bfd2-f15598f4820f\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.313080 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhf7g\" (UniqueName: \"kubernetes.io/projected/182248e0-13c8-4d04-bfd2-f15598f4820f-kube-api-access-lhf7g\") pod \"182248e0-13c8-4d04-bfd2-f15598f4820f\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.313182 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-scripts\") pod \"182248e0-13c8-4d04-bfd2-f15598f4820f\" (UID: \"182248e0-13c8-4d04-bfd2-f15598f4820f\") " Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.328915 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/182248e0-13c8-4d04-bfd2-f15598f4820f-kube-api-access-lhf7g" (OuterVolumeSpecName: "kube-api-access-lhf7g") pod "182248e0-13c8-4d04-bfd2-f15598f4820f" (UID: "182248e0-13c8-4d04-bfd2-f15598f4820f"). InnerVolumeSpecName "kube-api-access-lhf7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.361614 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.372264 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-scripts" (OuterVolumeSpecName: "scripts") pod "182248e0-13c8-4d04-bfd2-f15598f4820f" (UID: "182248e0-13c8-4d04-bfd2-f15598f4820f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.421016 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhf7g\" (UniqueName: \"kubernetes.io/projected/182248e0-13c8-4d04-bfd2-f15598f4820f-kube-api-access-lhf7g\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.421980 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.529795 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1eab4b44-ee86-4d03-99dc-ca014f5c7141" path="/var/lib/kubelet/pods/1eab4b44-ee86-4d03-99dc-ca014f5c7141/volumes" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.530773 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b8ccf3d-f0c3-4607-a421-da50e552cecf" path="/var/lib/kubelet/pods/2b8ccf3d-f0c3-4607-a421-da50e552cecf/volumes" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.576644 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-config-data" (OuterVolumeSpecName: "config-data") pod "182248e0-13c8-4d04-bfd2-f15598f4820f" (UID: "182248e0-13c8-4d04-bfd2-f15598f4820f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.609349 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "182248e0-13c8-4d04-bfd2-f15598f4820f" (UID: "182248e0-13c8-4d04-bfd2-f15598f4820f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.630701 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.630738 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182248e0-13c8-4d04-bfd2-f15598f4820f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.831349 4900 generic.go:334] "Generic (PLEG): container finished" podID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerID="9d109b504e305d5e427b44b94697f7a7eb084a8f73dc477bac3c6a96fe7f2c24" exitCode=137 Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.831422 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"182248e0-13c8-4d04-bfd2-f15598f4820f","Type":"ContainerDied","Data":"9d109b504e305d5e427b44b94697f7a7eb084a8f73dc477bac3c6a96fe7f2c24"} Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.831680 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"182248e0-13c8-4d04-bfd2-f15598f4820f","Type":"ContainerDied","Data":"a9bc195e68284cefbbebac2043aeb7d03c3f297288a8ae5a1bf7bf0d25c9492e"} Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.831517 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.831714 4900 scope.go:117] "RemoveContainer" containerID="9d109b504e305d5e427b44b94697f7a7eb084a8f73dc477bac3c6a96fe7f2c24" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.832642 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.916349 4900 scope.go:117] "RemoveContainer" containerID="b83ebc7ad00e2b1e70dd33ea59f5b764b73be85e089840b8deef1184a993e662" Jan 27 12:54:56 crc kubenswrapper[4900]: I0127 12:54:56.916947 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.017942 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.058726 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.087132 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.100010 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 27 12:54:57 crc kubenswrapper[4900]: E0127 12:54:57.100703 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-evaluator" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.100725 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-evaluator" Jan 27 12:54:57 crc kubenswrapper[4900]: E0127 12:54:57.100752 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-listener" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.100760 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-listener" Jan 27 12:54:57 crc kubenswrapper[4900]: E0127 12:54:57.100772 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-notifier" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.100778 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-notifier" Jan 27 12:54:57 crc kubenswrapper[4900]: E0127 12:54:57.100789 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-api" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.100795 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-api" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.101021 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-listener" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.101041 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-notifier" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.101072 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-evaluator" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.101083 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" containerName="aodh-api" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.108021 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.114142 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.114334 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-kgljt" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.114580 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.114699 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.114826 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.121781 4900 scope.go:117] "RemoveContainer" containerID="51eca2052983c39961bc7495089e560c7e1709bfe6705702c6bc3626f75eda66" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.145740 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.209825 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.255495 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-config-data\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.255824 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-combined-ca-bundle\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.255881 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-scripts\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.255930 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-internal-tls-certs\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.255950 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksvfv\" (UniqueName: \"kubernetes.io/projected/8cea539e-f7a9-4e80-bc7b-1645865568ba-kube-api-access-ksvfv\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.255978 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-public-tls-certs\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.366035 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-config-data\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.366134 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-combined-ca-bundle\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.366196 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-scripts\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.366467 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksvfv\" (UniqueName: \"kubernetes.io/projected/8cea539e-f7a9-4e80-bc7b-1645865568ba-kube-api-access-ksvfv\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.366520 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-internal-tls-certs\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.366585 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-public-tls-certs\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.381796 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-public-tls-certs\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.382305 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-config-data\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.382315 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-combined-ca-bundle\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.383737 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-internal-tls-certs\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.389976 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-scripts\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.419929 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksvfv\" (UniqueName: \"kubernetes.io/projected/8cea539e-f7a9-4e80-bc7b-1645865568ba-kube-api-access-ksvfv\") pod \"aodh-0\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " pod="openstack/aodh-0" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.563588 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-xkbsb"] Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.565992 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.579491 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-xkbsb"] Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.708704 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-config\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.708757 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.708871 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5ds9\" (UniqueName: \"kubernetes.io/projected/8b0296e5-d686-4e51-9753-b4a09e72183e-kube-api-access-x5ds9\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.708993 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.709027 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.709073 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: W0127 12:54:57.820829 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49d2fe9a_7b2a_45a1_a0ed_31f0e7ab515b.slice/crio-e6c0f5de9d1c58a90150f39ee441464c0184e8792c12054441b9fe75243a112d WatchSource:0}: Error finding container e6c0f5de9d1c58a90150f39ee441464c0184e8792c12054441b9fe75243a112d: Status 404 returned error can't find the container with id e6c0f5de9d1c58a90150f39ee441464c0184e8792c12054441b9fe75243a112d Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.830770 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-config\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.830841 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.831020 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5ds9\" (UniqueName: \"kubernetes.io/projected/8b0296e5-d686-4e51-9753-b4a09e72183e-kube-api-access-x5ds9\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.831203 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.831280 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.831327 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.832033 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-config\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.852926 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.854608 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.855652 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.855794 4900 scope.go:117] "RemoveContainer" containerID="9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.861499 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.906947 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"48f40cde-5734-4689-9d38-0ebcb2099b1b","Type":"ContainerStarted","Data":"809076276b64faa8abedb2b9f4e1c6dbec25fe10225b2f9e306f343b0354be42"} Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.911548 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5ds9\" (UniqueName: \"kubernetes.io/projected/8b0296e5-d686-4e51-9753-b4a09e72183e-kube-api-access-x5ds9\") pod \"dnsmasq-dns-79b5d74c8c-xkbsb\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:57 crc kubenswrapper[4900]: I0127 12:54:57.915642 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b","Type":"ContainerStarted","Data":"e6c0f5de9d1c58a90150f39ee441464c0184e8792c12054441b9fe75243a112d"} Jan 27 12:54:58 crc kubenswrapper[4900]: I0127 12:54:58.066182 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:54:58 crc kubenswrapper[4900]: E0127 12:54:58.072174 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod182248e0_13c8_4d04_bfd2_f15598f4820f.slice/crio-a9bc195e68284cefbbebac2043aeb7d03c3f297288a8ae5a1bf7bf0d25c9492e\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod182248e0_13c8_4d04_bfd2_f15598f4820f.slice\": RecentStats: unable to find data in memory cache]" Jan 27 12:54:58 crc kubenswrapper[4900]: I0127 12:54:58.108651 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 27 12:54:58 crc kubenswrapper[4900]: I0127 12:54:58.118689 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 27 12:54:58 crc kubenswrapper[4900]: I0127 12:54:58.122786 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 27 12:54:58 crc kubenswrapper[4900]: I0127 12:54:58.146346 4900 scope.go:117] "RemoveContainer" containerID="9d109b504e305d5e427b44b94697f7a7eb084a8f73dc477bac3c6a96fe7f2c24" Jan 27 12:54:58 crc kubenswrapper[4900]: E0127 12:54:58.146881 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d109b504e305d5e427b44b94697f7a7eb084a8f73dc477bac3c6a96fe7f2c24\": container with ID starting with 9d109b504e305d5e427b44b94697f7a7eb084a8f73dc477bac3c6a96fe7f2c24 not found: ID does not exist" containerID="9d109b504e305d5e427b44b94697f7a7eb084a8f73dc477bac3c6a96fe7f2c24" Jan 27 12:54:58 crc kubenswrapper[4900]: I0127 12:54:58.146928 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d109b504e305d5e427b44b94697f7a7eb084a8f73dc477bac3c6a96fe7f2c24"} err="failed to get container status \"9d109b504e305d5e427b44b94697f7a7eb084a8f73dc477bac3c6a96fe7f2c24\": rpc error: code = NotFound desc = could not find container \"9d109b504e305d5e427b44b94697f7a7eb084a8f73dc477bac3c6a96fe7f2c24\": container with ID starting with 9d109b504e305d5e427b44b94697f7a7eb084a8f73dc477bac3c6a96fe7f2c24 not found: ID does not exist" Jan 27 12:54:58 crc kubenswrapper[4900]: I0127 12:54:58.146966 4900 scope.go:117] "RemoveContainer" containerID="b83ebc7ad00e2b1e70dd33ea59f5b764b73be85e089840b8deef1184a993e662" Jan 27 12:54:58 crc kubenswrapper[4900]: E0127 12:54:58.147379 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b83ebc7ad00e2b1e70dd33ea59f5b764b73be85e089840b8deef1184a993e662\": container with ID starting with b83ebc7ad00e2b1e70dd33ea59f5b764b73be85e089840b8deef1184a993e662 not found: ID does not exist" containerID="b83ebc7ad00e2b1e70dd33ea59f5b764b73be85e089840b8deef1184a993e662" Jan 27 12:54:58 crc kubenswrapper[4900]: I0127 12:54:58.147410 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b83ebc7ad00e2b1e70dd33ea59f5b764b73be85e089840b8deef1184a993e662"} err="failed to get container status \"b83ebc7ad00e2b1e70dd33ea59f5b764b73be85e089840b8deef1184a993e662\": rpc error: code = NotFound desc = could not find container \"b83ebc7ad00e2b1e70dd33ea59f5b764b73be85e089840b8deef1184a993e662\": container with ID starting with b83ebc7ad00e2b1e70dd33ea59f5b764b73be85e089840b8deef1184a993e662 not found: ID does not exist" Jan 27 12:54:58 crc kubenswrapper[4900]: I0127 12:54:58.147424 4900 scope.go:117] "RemoveContainer" containerID="51eca2052983c39961bc7495089e560c7e1709bfe6705702c6bc3626f75eda66" Jan 27 12:54:58 crc kubenswrapper[4900]: E0127 12:54:58.151457 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51eca2052983c39961bc7495089e560c7e1709bfe6705702c6bc3626f75eda66\": container with ID starting with 51eca2052983c39961bc7495089e560c7e1709bfe6705702c6bc3626f75eda66 not found: ID does not exist" containerID="51eca2052983c39961bc7495089e560c7e1709bfe6705702c6bc3626f75eda66" Jan 27 12:54:58 crc kubenswrapper[4900]: I0127 12:54:58.151527 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51eca2052983c39961bc7495089e560c7e1709bfe6705702c6bc3626f75eda66"} err="failed to get container status \"51eca2052983c39961bc7495089e560c7e1709bfe6705702c6bc3626f75eda66\": rpc error: code = NotFound desc = could not find container \"51eca2052983c39961bc7495089e560c7e1709bfe6705702c6bc3626f75eda66\": container with ID starting with 51eca2052983c39961bc7495089e560c7e1709bfe6705702c6bc3626f75eda66 not found: ID does not exist" Jan 27 12:54:58 crc kubenswrapper[4900]: I0127 12:54:58.151568 4900 scope.go:117] "RemoveContainer" containerID="9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502" Jan 27 12:54:58 crc kubenswrapper[4900]: I0127 12:54:58.152199 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:54:58 crc kubenswrapper[4900]: E0127 12:54:58.153112 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502\": container with ID starting with 9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502 not found: ID does not exist" containerID="9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502" Jan 27 12:54:58 crc kubenswrapper[4900]: I0127 12:54:58.153146 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502"} err="failed to get container status \"9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502\": rpc error: code = NotFound desc = could not find container \"9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502\": container with ID starting with 9b2d326cc5f72dd2f0daf6701cec45ae1e9287de6f5d4fae613b3490b7c16502 not found: ID does not exist" Jan 27 12:54:58 crc kubenswrapper[4900]: I0127 12:54:58.592239 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="182248e0-13c8-4d04-bfd2-f15598f4820f" path="/var/lib/kubelet/pods/182248e0-13c8-4d04-bfd2-f15598f4820f/volumes" Jan 27 12:54:59 crc kubenswrapper[4900]: I0127 12:54:58.999869 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"48f40cde-5734-4689-9d38-0ebcb2099b1b","Type":"ContainerStarted","Data":"409d72d70b373ccd19078e9b6e850d33fc9bcb64f09de9fbe89176fbfc721eed"} Jan 27 12:54:59 crc kubenswrapper[4900]: I0127 12:54:59.077930 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=3.383012145 podStartE2EDuration="4.077908125s" podCreationTimestamp="2026-01-27 12:54:55 +0000 UTC" firstStartedPulling="2026-01-27 12:54:57.030413472 +0000 UTC m=+1724.267441682" lastFinishedPulling="2026-01-27 12:54:57.725309452 +0000 UTC m=+1724.962337662" observedRunningTime="2026-01-27 12:54:59.042406745 +0000 UTC m=+1726.279434955" watchObservedRunningTime="2026-01-27 12:54:59.077908125 +0000 UTC m=+1726.314936335" Jan 27 12:54:59 crc kubenswrapper[4900]: I0127 12:54:59.362492 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 27 12:54:59 crc kubenswrapper[4900]: W0127 12:54:59.378771 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8cea539e_f7a9_4e80_bc7b_1645865568ba.slice/crio-b652040978129f123b3f14b3f7d95cdb79944e53b79156c3a609174af7ddbaa1 WatchSource:0}: Error finding container b652040978129f123b3f14b3f7d95cdb79944e53b79156c3a609174af7ddbaa1: Status 404 returned error can't find the container with id b652040978129f123b3f14b3f7d95cdb79944e53b79156c3a609174af7ddbaa1 Jan 27 12:54:59 crc kubenswrapper[4900]: I0127 12:54:59.389679 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-xkbsb"] Jan 27 12:55:00 crc kubenswrapper[4900]: I0127 12:55:00.057693 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8cea539e-f7a9-4e80-bc7b-1645865568ba","Type":"ContainerStarted","Data":"b652040978129f123b3f14b3f7d95cdb79944e53b79156c3a609174af7ddbaa1"} Jan 27 12:55:00 crc kubenswrapper[4900]: I0127 12:55:00.064765 4900 generic.go:334] "Generic (PLEG): container finished" podID="8b0296e5-d686-4e51-9753-b4a09e72183e" containerID="bad03f9ef037dbc8322f3157fc6ab2e54d3134b9ba50281b2452bdab3dbd42e0" exitCode=0 Jan 27 12:55:00 crc kubenswrapper[4900]: I0127 12:55:00.064863 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" event={"ID":"8b0296e5-d686-4e51-9753-b4a09e72183e","Type":"ContainerDied","Data":"bad03f9ef037dbc8322f3157fc6ab2e54d3134b9ba50281b2452bdab3dbd42e0"} Jan 27 12:55:00 crc kubenswrapper[4900]: I0127 12:55:00.064900 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" event={"ID":"8b0296e5-d686-4e51-9753-b4a09e72183e","Type":"ContainerStarted","Data":"0870b957adcf7b11010e514bfdfde1e069f2dee55a4d79108d731a61bd4885ef"} Jan 27 12:55:00 crc kubenswrapper[4900]: I0127 12:55:00.078742 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b","Type":"ContainerStarted","Data":"6ef98cbb012f7b1dae72713b75c8c46da18e34f17adf08fd8813679b2aeb0d54"} Jan 27 12:55:00 crc kubenswrapper[4900]: I0127 12:55:00.078842 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 27 12:55:00 crc kubenswrapper[4900]: I0127 12:55:00.149769 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=4.689943561 podStartE2EDuration="5.149741391s" podCreationTimestamp="2026-01-27 12:54:55 +0000 UTC" firstStartedPulling="2026-01-27 12:54:57.853961995 +0000 UTC m=+1725.090990205" lastFinishedPulling="2026-01-27 12:54:58.313759825 +0000 UTC m=+1725.550788035" observedRunningTime="2026-01-27 12:55:00.141986016 +0000 UTC m=+1727.379014226" watchObservedRunningTime="2026-01-27 12:55:00.149741391 +0000 UTC m=+1727.386769601" Jan 27 12:55:00 crc kubenswrapper[4900]: I0127 12:55:00.220567 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:55:00 crc kubenswrapper[4900]: I0127 12:55:00.220959 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="ceilometer-central-agent" containerID="cri-o://a74c693832fccbd0c134ca600dcc21213cf57e2ea4e26eea0ea9072e40e0874a" gracePeriod=30 Jan 27 12:55:00 crc kubenswrapper[4900]: I0127 12:55:00.221420 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="proxy-httpd" containerID="cri-o://c052ead14ba9fc79c26767e2aba1acaf5a6d0e3e406e31242145e429fe21e451" gracePeriod=30 Jan 27 12:55:00 crc kubenswrapper[4900]: I0127 12:55:00.221668 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="ceilometer-notification-agent" containerID="cri-o://4b20bd641d9bcd1c9494f4e55c3e92935ab4cfb59d02fc7b56f79dfbc53f1a4b" gracePeriod=30 Jan 27 12:55:00 crc kubenswrapper[4900]: I0127 12:55:00.221747 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="sg-core" containerID="cri-o://d8a2e50f8fd6d787bc58416c0f2aa1d34e25aaa6898c22ac1e748213e7ff8d6f" gracePeriod=30 Jan 27 12:55:01 crc kubenswrapper[4900]: I0127 12:55:01.094532 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8cea539e-f7a9-4e80-bc7b-1645865568ba","Type":"ContainerStarted","Data":"51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4"} Jan 27 12:55:01 crc kubenswrapper[4900]: I0127 12:55:01.097084 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" event={"ID":"8b0296e5-d686-4e51-9753-b4a09e72183e","Type":"ContainerStarted","Data":"49b0b9bb3449a59723ae32c5e7f5c4052f0b2fb6b4b871d90676ba999b6ad5fc"} Jan 27 12:55:01 crc kubenswrapper[4900]: I0127 12:55:01.097929 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:55:01 crc kubenswrapper[4900]: I0127 12:55:01.100339 4900 generic.go:334] "Generic (PLEG): container finished" podID="00248c3f-554f-4360-aad4-9dd642eccf99" containerID="c052ead14ba9fc79c26767e2aba1acaf5a6d0e3e406e31242145e429fe21e451" exitCode=0 Jan 27 12:55:01 crc kubenswrapper[4900]: I0127 12:55:01.100368 4900 generic.go:334] "Generic (PLEG): container finished" podID="00248c3f-554f-4360-aad4-9dd642eccf99" containerID="d8a2e50f8fd6d787bc58416c0f2aa1d34e25aaa6898c22ac1e748213e7ff8d6f" exitCode=2 Jan 27 12:55:01 crc kubenswrapper[4900]: I0127 12:55:01.100375 4900 generic.go:334] "Generic (PLEG): container finished" podID="00248c3f-554f-4360-aad4-9dd642eccf99" containerID="a74c693832fccbd0c134ca600dcc21213cf57e2ea4e26eea0ea9072e40e0874a" exitCode=0 Jan 27 12:55:01 crc kubenswrapper[4900]: I0127 12:55:01.100585 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00248c3f-554f-4360-aad4-9dd642eccf99","Type":"ContainerDied","Data":"c052ead14ba9fc79c26767e2aba1acaf5a6d0e3e406e31242145e429fe21e451"} Jan 27 12:55:01 crc kubenswrapper[4900]: I0127 12:55:01.100695 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00248c3f-554f-4360-aad4-9dd642eccf99","Type":"ContainerDied","Data":"d8a2e50f8fd6d787bc58416c0f2aa1d34e25aaa6898c22ac1e748213e7ff8d6f"} Jan 27 12:55:01 crc kubenswrapper[4900]: I0127 12:55:01.100773 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00248c3f-554f-4360-aad4-9dd642eccf99","Type":"ContainerDied","Data":"a74c693832fccbd0c134ca600dcc21213cf57e2ea4e26eea0ea9072e40e0874a"} Jan 27 12:55:01 crc kubenswrapper[4900]: I0127 12:55:01.127866 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" podStartSLOduration=4.127841468 podStartE2EDuration="4.127841468s" podCreationTimestamp="2026-01-27 12:54:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:55:01.121080962 +0000 UTC m=+1728.358109172" watchObservedRunningTime="2026-01-27 12:55:01.127841468 +0000 UTC m=+1728.364869678" Jan 27 12:55:01 crc kubenswrapper[4900]: I0127 12:55:01.335247 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:55:01 crc kubenswrapper[4900]: I0127 12:55:01.335855 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="db60a14a-4df5-4346-b556-b937aa93d253" containerName="nova-api-log" containerID="cri-o://dcf6a9f92ebd78897491be3f732f93592e6f3c92233ab7514aca5ff7f72deffc" gracePeriod=30 Jan 27 12:55:01 crc kubenswrapper[4900]: I0127 12:55:01.335991 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="db60a14a-4df5-4346-b556-b937aa93d253" containerName="nova-api-api" containerID="cri-o://570bc83dfa93ce18b751df1767c0525333d129f59da553f5bdc530e0817d0912" gracePeriod=30 Jan 27 12:55:02 crc kubenswrapper[4900]: I0127 12:55:02.116484 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8cea539e-f7a9-4e80-bc7b-1645865568ba","Type":"ContainerStarted","Data":"1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938"} Jan 27 12:55:02 crc kubenswrapper[4900]: I0127 12:55:02.116891 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8cea539e-f7a9-4e80-bc7b-1645865568ba","Type":"ContainerStarted","Data":"8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c"} Jan 27 12:55:02 crc kubenswrapper[4900]: I0127 12:55:02.119179 4900 generic.go:334] "Generic (PLEG): container finished" podID="db60a14a-4df5-4346-b556-b937aa93d253" containerID="dcf6a9f92ebd78897491be3f732f93592e6f3c92233ab7514aca5ff7f72deffc" exitCode=143 Jan 27 12:55:02 crc kubenswrapper[4900]: I0127 12:55:02.119465 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"db60a14a-4df5-4346-b556-b937aa93d253","Type":"ContainerDied","Data":"dcf6a9f92ebd78897491be3f732f93592e6f3c92233ab7514aca5ff7f72deffc"} Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.065295 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.100511 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.124532 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.124579 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.139228 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8cea539e-f7a9-4e80-bc7b-1645865568ba","Type":"ContainerStarted","Data":"5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce"} Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.171830 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=3.9384146209999997 podStartE2EDuration="7.171807289s" podCreationTimestamp="2026-01-27 12:54:56 +0000 UTC" firstStartedPulling="2026-01-27 12:54:59.3817311 +0000 UTC m=+1726.618759310" lastFinishedPulling="2026-01-27 12:55:02.615123768 +0000 UTC m=+1729.852151978" observedRunningTime="2026-01-27 12:55:03.166732441 +0000 UTC m=+1730.403760681" watchObservedRunningTime="2026-01-27 12:55:03.171807289 +0000 UTC m=+1730.408835499" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.196922 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.507047 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-4rr45"] Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.509921 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.521880 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.529262 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.552369 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-4rr45"] Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.694298 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-scripts\") pod \"nova-cell1-cell-mapping-4rr45\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.694541 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4rr45\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.694650 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2t2n\" (UniqueName: \"kubernetes.io/projected/5730e44a-3c94-46ba-8fd9-c659adcbfa31-kube-api-access-s2t2n\") pod \"nova-cell1-cell-mapping-4rr45\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.694771 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-config-data\") pod \"nova-cell1-cell-mapping-4rr45\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.799683 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4rr45\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.799777 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2t2n\" (UniqueName: \"kubernetes.io/projected/5730e44a-3c94-46ba-8fd9-c659adcbfa31-kube-api-access-s2t2n\") pod \"nova-cell1-cell-mapping-4rr45\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.799867 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-config-data\") pod \"nova-cell1-cell-mapping-4rr45\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.799928 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-scripts\") pod \"nova-cell1-cell-mapping-4rr45\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.812632 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4rr45\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.835772 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-scripts\") pod \"nova-cell1-cell-mapping-4rr45\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.835771 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2t2n\" (UniqueName: \"kubernetes.io/projected/5730e44a-3c94-46ba-8fd9-c659adcbfa31-kube-api-access-s2t2n\") pod \"nova-cell1-cell-mapping-4rr45\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.838666 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-config-data\") pod \"nova-cell1-cell-mapping-4rr45\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:03 crc kubenswrapper[4900]: I0127 12:55:03.852795 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:04 crc kubenswrapper[4900]: I0127 12:55:04.128412 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.0:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 12:55:04 crc kubenswrapper[4900]: I0127 12:55:04.133392 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.0:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 12:55:04 crc kubenswrapper[4900]: I0127 12:55:04.632039 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-4rr45"] Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.296577 4900 generic.go:334] "Generic (PLEG): container finished" podID="db60a14a-4df5-4346-b556-b937aa93d253" containerID="570bc83dfa93ce18b751df1767c0525333d129f59da553f5bdc530e0817d0912" exitCode=0 Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.296946 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"db60a14a-4df5-4346-b556-b937aa93d253","Type":"ContainerDied","Data":"570bc83dfa93ce18b751df1767c0525333d129f59da553f5bdc530e0817d0912"} Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.321160 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4rr45" event={"ID":"5730e44a-3c94-46ba-8fd9-c659adcbfa31","Type":"ContainerStarted","Data":"07ddbdd240fe9824bb87f7d09a6e5a771abf22c7a943c7467ef989bdd7ae3131"} Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.485306 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:55:05 crc kubenswrapper[4900]: E0127 12:55:05.485661 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.634344 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.795947 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db60a14a-4df5-4346-b556-b937aa93d253-logs\") pod \"db60a14a-4df5-4346-b556-b937aa93d253\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.796272 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqqxf\" (UniqueName: \"kubernetes.io/projected/db60a14a-4df5-4346-b556-b937aa93d253-kube-api-access-nqqxf\") pod \"db60a14a-4df5-4346-b556-b937aa93d253\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.796309 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db60a14a-4df5-4346-b556-b937aa93d253-combined-ca-bundle\") pod \"db60a14a-4df5-4346-b556-b937aa93d253\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.796406 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db60a14a-4df5-4346-b556-b937aa93d253-config-data\") pod \"db60a14a-4df5-4346-b556-b937aa93d253\" (UID: \"db60a14a-4df5-4346-b556-b937aa93d253\") " Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.798707 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db60a14a-4df5-4346-b556-b937aa93d253-logs" (OuterVolumeSpecName: "logs") pod "db60a14a-4df5-4346-b556-b937aa93d253" (UID: "db60a14a-4df5-4346-b556-b937aa93d253"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.805709 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db60a14a-4df5-4346-b556-b937aa93d253-kube-api-access-nqqxf" (OuterVolumeSpecName: "kube-api-access-nqqxf") pod "db60a14a-4df5-4346-b556-b937aa93d253" (UID: "db60a14a-4df5-4346-b556-b937aa93d253"). InnerVolumeSpecName "kube-api-access-nqqxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.843486 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db60a14a-4df5-4346-b556-b937aa93d253-config-data" (OuterVolumeSpecName: "config-data") pod "db60a14a-4df5-4346-b556-b937aa93d253" (UID: "db60a14a-4df5-4346-b556-b937aa93d253"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.891619 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db60a14a-4df5-4346-b556-b937aa93d253-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "db60a14a-4df5-4346-b556-b937aa93d253" (UID: "db60a14a-4df5-4346-b556-b937aa93d253"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.899588 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqqxf\" (UniqueName: \"kubernetes.io/projected/db60a14a-4df5-4346-b556-b937aa93d253-kube-api-access-nqqxf\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.899635 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db60a14a-4df5-4346-b556-b937aa93d253-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.899646 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db60a14a-4df5-4346-b556-b937aa93d253-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:05 crc kubenswrapper[4900]: I0127 12:55:05.899655 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db60a14a-4df5-4346-b556-b937aa93d253-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.339020 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4rr45" event={"ID":"5730e44a-3c94-46ba-8fd9-c659adcbfa31","Type":"ContainerStarted","Data":"b878424c8310cb03da552e59b089977e9f157b5a3f749c70249626fc40aa3655"} Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.341993 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"db60a14a-4df5-4346-b556-b937aa93d253","Type":"ContainerDied","Data":"2796075f15466ccfd808bed5d62d2a7a7dfeb4f1fe0b6bf24ca35bb68b9c67da"} Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.342051 4900 scope.go:117] "RemoveContainer" containerID="570bc83dfa93ce18b751df1767c0525333d129f59da553f5bdc530e0817d0912" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.342179 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.353311 4900 generic.go:334] "Generic (PLEG): container finished" podID="00248c3f-554f-4360-aad4-9dd642eccf99" containerID="4b20bd641d9bcd1c9494f4e55c3e92935ab4cfb59d02fc7b56f79dfbc53f1a4b" exitCode=0 Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.353387 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00248c3f-554f-4360-aad4-9dd642eccf99","Type":"ContainerDied","Data":"4b20bd641d9bcd1c9494f4e55c3e92935ab4cfb59d02fc7b56f79dfbc53f1a4b"} Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.353444 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00248c3f-554f-4360-aad4-9dd642eccf99","Type":"ContainerDied","Data":"c529e3c72e9887c0a06ebf88322e927f860fe8e56f5f9723dd11d083fe53a9ce"} Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.353461 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c529e3c72e9887c0a06ebf88322e927f860fe8e56f5f9723dd11d083fe53a9ce" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.368810 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.379169 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-4rr45" podStartSLOduration=3.379142301 podStartE2EDuration="3.379142301s" podCreationTimestamp="2026-01-27 12:55:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:55:06.369086309 +0000 UTC m=+1733.606114509" watchObservedRunningTime="2026-01-27 12:55:06.379142301 +0000 UTC m=+1733.616170521" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.387179 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.391646 4900 scope.go:117] "RemoveContainer" containerID="dcf6a9f92ebd78897491be3f732f93592e6f3c92233ab7514aca5ff7f72deffc" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.399994 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.424836 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.442147 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 27 12:55:06 crc kubenswrapper[4900]: E0127 12:55:06.442706 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="ceilometer-notification-agent" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.442720 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="ceilometer-notification-agent" Jan 27 12:55:06 crc kubenswrapper[4900]: E0127 12:55:06.442742 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="proxy-httpd" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.442750 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="proxy-httpd" Jan 27 12:55:06 crc kubenswrapper[4900]: E0127 12:55:06.442769 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="ceilometer-central-agent" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.442775 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="ceilometer-central-agent" Jan 27 12:55:06 crc kubenswrapper[4900]: E0127 12:55:06.442803 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="sg-core" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.442809 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="sg-core" Jan 27 12:55:06 crc kubenswrapper[4900]: E0127 12:55:06.442820 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db60a14a-4df5-4346-b556-b937aa93d253" containerName="nova-api-log" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.442826 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="db60a14a-4df5-4346-b556-b937aa93d253" containerName="nova-api-log" Jan 27 12:55:06 crc kubenswrapper[4900]: E0127 12:55:06.442842 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db60a14a-4df5-4346-b556-b937aa93d253" containerName="nova-api-api" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.442847 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="db60a14a-4df5-4346-b556-b937aa93d253" containerName="nova-api-api" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.443072 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="ceilometer-central-agent" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.443088 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="sg-core" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.443101 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="proxy-httpd" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.443114 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="db60a14a-4df5-4346-b556-b937aa93d253" containerName="nova-api-log" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.443124 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="db60a14a-4df5-4346-b556-b937aa93d253" containerName="nova-api-api" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.443137 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" containerName="ceilometer-notification-agent" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.444504 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.449783 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.450306 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.455295 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.517976 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00248c3f-554f-4360-aad4-9dd642eccf99-run-httpd\") pod \"00248c3f-554f-4360-aad4-9dd642eccf99\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.518034 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-config-data\") pod \"00248c3f-554f-4360-aad4-9dd642eccf99\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.518129 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-sg-core-conf-yaml\") pod \"00248c3f-554f-4360-aad4-9dd642eccf99\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.518181 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00248c3f-554f-4360-aad4-9dd642eccf99-log-httpd\") pod \"00248c3f-554f-4360-aad4-9dd642eccf99\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.518225 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-combined-ca-bundle\") pod \"00248c3f-554f-4360-aad4-9dd642eccf99\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.518346 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4p8l\" (UniqueName: \"kubernetes.io/projected/00248c3f-554f-4360-aad4-9dd642eccf99-kube-api-access-q4p8l\") pod \"00248c3f-554f-4360-aad4-9dd642eccf99\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.518380 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-scripts\") pod \"00248c3f-554f-4360-aad4-9dd642eccf99\" (UID: \"00248c3f-554f-4360-aad4-9dd642eccf99\") " Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.526707 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00248c3f-554f-4360-aad4-9dd642eccf99-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "00248c3f-554f-4360-aad4-9dd642eccf99" (UID: "00248c3f-554f-4360-aad4-9dd642eccf99"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.527560 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00248c3f-554f-4360-aad4-9dd642eccf99-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "00248c3f-554f-4360-aad4-9dd642eccf99" (UID: "00248c3f-554f-4360-aad4-9dd642eccf99"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.547826 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db60a14a-4df5-4346-b556-b937aa93d253" path="/var/lib/kubelet/pods/db60a14a-4df5-4346-b556-b937aa93d253/volumes" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.552163 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00248c3f-554f-4360-aad4-9dd642eccf99-kube-api-access-q4p8l" (OuterVolumeSpecName: "kube-api-access-q4p8l") pod "00248c3f-554f-4360-aad4-9dd642eccf99" (UID: "00248c3f-554f-4360-aad4-9dd642eccf99"). InnerVolumeSpecName "kube-api-access-q4p8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.588535 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-scripts" (OuterVolumeSpecName: "scripts") pod "00248c3f-554f-4360-aad4-9dd642eccf99" (UID: "00248c3f-554f-4360-aad4-9dd642eccf99"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.599691 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.621019 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.621093 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.621276 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-public-tls-certs\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.621362 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-logs\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.621386 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2j49p\" (UniqueName: \"kubernetes.io/projected/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-kube-api-access-2j49p\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.621566 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-config-data\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.621660 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4p8l\" (UniqueName: \"kubernetes.io/projected/00248c3f-554f-4360-aad4-9dd642eccf99-kube-api-access-q4p8l\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.621674 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.621683 4900 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00248c3f-554f-4360-aad4-9dd642eccf99-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.621690 4900 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00248c3f-554f-4360-aad4-9dd642eccf99-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.655218 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "00248c3f-554f-4360-aad4-9dd642eccf99" (UID: "00248c3f-554f-4360-aad4-9dd642eccf99"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.683366 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "00248c3f-554f-4360-aad4-9dd642eccf99" (UID: "00248c3f-554f-4360-aad4-9dd642eccf99"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.724703 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-public-tls-certs\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.724859 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-logs\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.724890 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2j49p\" (UniqueName: \"kubernetes.io/projected/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-kube-api-access-2j49p\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.724993 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-config-data\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.725182 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.725209 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.725458 4900 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.725477 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.728658 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-logs\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.735205 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.738244 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.738491 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-public-tls-certs\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.759696 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2j49p\" (UniqueName: \"kubernetes.io/projected/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-kube-api-access-2j49p\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.759770 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-config-data\") pod \"nova-api-0\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.787872 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-config-data" (OuterVolumeSpecName: "config-data") pod "00248c3f-554f-4360-aad4-9dd642eccf99" (UID: "00248c3f-554f-4360-aad4-9dd642eccf99"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.815150 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 12:55:06 crc kubenswrapper[4900]: I0127 12:55:06.827642 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00248c3f-554f-4360-aad4-9dd642eccf99-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.369097 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.416387 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.439255 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.456189 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.462889 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.466167 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.466453 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.466585 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.482316 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.565416 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.671762 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-config-data\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.671914 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.672153 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.673736 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.673994 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-scripts\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.674037 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4b3481e-e421-4a42-aa33-8327e969136a-run-httpd\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.674186 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwflj\" (UniqueName: \"kubernetes.io/projected/b4b3481e-e421-4a42-aa33-8327e969136a-kube-api-access-vwflj\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.674327 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4b3481e-e421-4a42-aa33-8327e969136a-log-httpd\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.778176 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.778617 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.778946 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.779110 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-scripts\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.779144 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4b3481e-e421-4a42-aa33-8327e969136a-run-httpd\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.779171 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwflj\" (UniqueName: \"kubernetes.io/projected/b4b3481e-e421-4a42-aa33-8327e969136a-kube-api-access-vwflj\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.779380 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4b3481e-e421-4a42-aa33-8327e969136a-log-httpd\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.779686 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-config-data\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.779868 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4b3481e-e421-4a42-aa33-8327e969136a-run-httpd\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.780478 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4b3481e-e421-4a42-aa33-8327e969136a-log-httpd\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.786236 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.792259 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-config-data\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.794661 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.800486 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.803421 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwflj\" (UniqueName: \"kubernetes.io/projected/b4b3481e-e421-4a42-aa33-8327e969136a-kube-api-access-vwflj\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:07 crc kubenswrapper[4900]: I0127 12:55:07.804521 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-scripts\") pod \"ceilometer-0\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " pod="openstack/ceilometer-0" Jan 27 12:55:08 crc kubenswrapper[4900]: I0127 12:55:08.095770 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:55:08 crc kubenswrapper[4900]: I0127 12:55:08.167352 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:55:08 crc kubenswrapper[4900]: I0127 12:55:08.350982 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-np784"] Jan 27 12:55:08 crc kubenswrapper[4900]: I0127 12:55:08.351589 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5fbc4d444f-np784" podUID="dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" containerName="dnsmasq-dns" containerID="cri-o://3cf53e8905946af7fd7264c27f6202978992cbf0dcfbc7eac50b1ce426cdfbc5" gracePeriod=10 Jan 27 12:55:08 crc kubenswrapper[4900]: I0127 12:55:08.458944 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70","Type":"ContainerStarted","Data":"29798a23552d2e524b0546cb95c61e05fd9010a103bb37b896e69699ef73bca2"} Jan 27 12:55:08 crc kubenswrapper[4900]: I0127 12:55:08.459010 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70","Type":"ContainerStarted","Data":"a40cf3efd661aa162db755fd7bc58ab4a0b1b15b1ec7971f6bd90f61e4595bca"} Jan 27 12:55:08 crc kubenswrapper[4900]: I0127 12:55:08.723609 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00248c3f-554f-4360-aad4-9dd642eccf99" path="/var/lib/kubelet/pods/00248c3f-554f-4360-aad4-9dd642eccf99/volumes" Jan 27 12:55:09 crc kubenswrapper[4900]: E0127 12:55:09.021476 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc6fa3fb_0ecf_4b99_a61c_6c448856afbc.slice/crio-3cf53e8905946af7fd7264c27f6202978992cbf0dcfbc7eac50b1ce426cdfbc5.scope\": RecentStats: unable to find data in memory cache]" Jan 27 12:55:09 crc kubenswrapper[4900]: I0127 12:55:09.149474 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:55:09 crc kubenswrapper[4900]: I0127 12:55:09.476264 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4b3481e-e421-4a42-aa33-8327e969136a","Type":"ContainerStarted","Data":"3ec12ad556857fa65e6e86aaa472059dc6f02b6388d54a2c9e8cfe67effb81fe"} Jan 27 12:55:09 crc kubenswrapper[4900]: I0127 12:55:09.481734 4900 generic.go:334] "Generic (PLEG): container finished" podID="dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" containerID="3cf53e8905946af7fd7264c27f6202978992cbf0dcfbc7eac50b1ce426cdfbc5" exitCode=0 Jan 27 12:55:09 crc kubenswrapper[4900]: I0127 12:55:09.481794 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-np784" event={"ID":"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc","Type":"ContainerDied","Data":"3cf53e8905946af7fd7264c27f6202978992cbf0dcfbc7eac50b1ce426cdfbc5"} Jan 27 12:55:09 crc kubenswrapper[4900]: I0127 12:55:09.491682 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70","Type":"ContainerStarted","Data":"3080d71c656e2f1867e2574e4f20179a1ea8e26ef0d1bc7b000a71f0b935117e"} Jan 27 12:55:09 crc kubenswrapper[4900]: I0127 12:55:09.536645 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.536623357 podStartE2EDuration="3.536623357s" podCreationTimestamp="2026-01-27 12:55:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:55:09.513747113 +0000 UTC m=+1736.750775323" watchObservedRunningTime="2026-01-27 12:55:09.536623357 +0000 UTC m=+1736.773651567" Jan 27 12:55:09 crc kubenswrapper[4900]: I0127 12:55:09.927212 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.098047 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-config\") pod \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.098130 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-ovsdbserver-nb\") pod \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.098490 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-ovsdbserver-sb\") pod \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.098541 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-dns-swift-storage-0\") pod \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.098582 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-dns-svc\") pod \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.098684 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkzzv\" (UniqueName: \"kubernetes.io/projected/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-kube-api-access-lkzzv\") pod \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\" (UID: \"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc\") " Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.110576 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-kube-api-access-lkzzv" (OuterVolumeSpecName: "kube-api-access-lkzzv") pod "dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" (UID: "dc6fa3fb-0ecf-4b99-a61c-6c448856afbc"). InnerVolumeSpecName "kube-api-access-lkzzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.167583 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-config" (OuterVolumeSpecName: "config") pod "dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" (UID: "dc6fa3fb-0ecf-4b99-a61c-6c448856afbc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.178497 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" (UID: "dc6fa3fb-0ecf-4b99-a61c-6c448856afbc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.186738 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" (UID: "dc6fa3fb-0ecf-4b99-a61c-6c448856afbc"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.192315 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" (UID: "dc6fa3fb-0ecf-4b99-a61c-6c448856afbc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.194130 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" (UID: "dc6fa3fb-0ecf-4b99-a61c-6c448856afbc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.202647 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.202692 4900 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.202713 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.202723 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkzzv\" (UniqueName: \"kubernetes.io/projected/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-kube-api-access-lkzzv\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.202733 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.202741 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.504312 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-np784" event={"ID":"dc6fa3fb-0ecf-4b99-a61c-6c448856afbc","Type":"ContainerDied","Data":"a354d11ef1d81bc57e38d77d86c28d945d09a1fb6670012708698e9567c3e22a"} Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.504346 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-np784" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.504365 4900 scope.go:117] "RemoveContainer" containerID="3cf53e8905946af7fd7264c27f6202978992cbf0dcfbc7eac50b1ce426cdfbc5" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.511523 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4b3481e-e421-4a42-aa33-8327e969136a","Type":"ContainerStarted","Data":"986a18ba90d6b0ae2c07214f57514e5accfd9b71cc3a1c965aaed030fd5c5956"} Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.554440 4900 scope.go:117] "RemoveContainer" containerID="3a84864f7e482b6a687b22c1f96dd8e49262950c68e7377d7fa998effcacf94d" Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.556430 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-np784"] Jan 27 12:55:10 crc kubenswrapper[4900]: I0127 12:55:10.569039 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-np784"] Jan 27 12:55:11 crc kubenswrapper[4900]: I0127 12:55:11.551850 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4b3481e-e421-4a42-aa33-8327e969136a","Type":"ContainerStarted","Data":"75e25f1e3674dd01cabc41f4a3ee2dd01dee96ea3a87dba6384183400cb73aef"} Jan 27 12:55:11 crc kubenswrapper[4900]: I0127 12:55:11.552223 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4b3481e-e421-4a42-aa33-8327e969136a","Type":"ContainerStarted","Data":"10901faaf20beb816b152adc3f9842c06d6ad3703b05633fee9cd9087d2cd11d"} Jan 27 12:55:12 crc kubenswrapper[4900]: I0127 12:55:12.499757 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" path="/var/lib/kubelet/pods/dc6fa3fb-0ecf-4b99-a61c-6c448856afbc/volumes" Jan 27 12:55:12 crc kubenswrapper[4900]: I0127 12:55:12.573686 4900 generic.go:334] "Generic (PLEG): container finished" podID="5730e44a-3c94-46ba-8fd9-c659adcbfa31" containerID="b878424c8310cb03da552e59b089977e9f157b5a3f749c70249626fc40aa3655" exitCode=0 Jan 27 12:55:12 crc kubenswrapper[4900]: I0127 12:55:12.573743 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4rr45" event={"ID":"5730e44a-3c94-46ba-8fd9-c659adcbfa31","Type":"ContainerDied","Data":"b878424c8310cb03da552e59b089977e9f157b5a3f749c70249626fc40aa3655"} Jan 27 12:55:13 crc kubenswrapper[4900]: I0127 12:55:13.126144 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 27 12:55:13 crc kubenswrapper[4900]: I0127 12:55:13.127301 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 27 12:55:13 crc kubenswrapper[4900]: I0127 12:55:13.143499 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 27 12:55:13 crc kubenswrapper[4900]: I0127 12:55:13.594926 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4b3481e-e421-4a42-aa33-8327e969136a","Type":"ContainerStarted","Data":"ff3ac518790cf8fbd8982de00a235d12d7efbf4ee948c875398639254a43e04b"} Jan 27 12:55:13 crc kubenswrapper[4900]: I0127 12:55:13.597788 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 12:55:13 crc kubenswrapper[4900]: I0127 12:55:13.610762 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 27 12:55:13 crc kubenswrapper[4900]: I0127 12:55:13.637266 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.109200109 podStartE2EDuration="6.637240906s" podCreationTimestamp="2026-01-27 12:55:07 +0000 UTC" firstStartedPulling="2026-01-27 12:55:09.173479801 +0000 UTC m=+1736.410508011" lastFinishedPulling="2026-01-27 12:55:12.701520588 +0000 UTC m=+1739.938548808" observedRunningTime="2026-01-27 12:55:13.624190607 +0000 UTC m=+1740.861218817" watchObservedRunningTime="2026-01-27 12:55:13.637240906 +0000 UTC m=+1740.874269116" Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.176000 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.321702 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2t2n\" (UniqueName: \"kubernetes.io/projected/5730e44a-3c94-46ba-8fd9-c659adcbfa31-kube-api-access-s2t2n\") pod \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.322505 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-scripts\") pod \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.322565 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-config-data\") pod \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.322905 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-combined-ca-bundle\") pod \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\" (UID: \"5730e44a-3c94-46ba-8fd9-c659adcbfa31\") " Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.329332 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-scripts" (OuterVolumeSpecName: "scripts") pod "5730e44a-3c94-46ba-8fd9-c659adcbfa31" (UID: "5730e44a-3c94-46ba-8fd9-c659adcbfa31"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.330042 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5730e44a-3c94-46ba-8fd9-c659adcbfa31-kube-api-access-s2t2n" (OuterVolumeSpecName: "kube-api-access-s2t2n") pod "5730e44a-3c94-46ba-8fd9-c659adcbfa31" (UID: "5730e44a-3c94-46ba-8fd9-c659adcbfa31"). InnerVolumeSpecName "kube-api-access-s2t2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.369373 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-config-data" (OuterVolumeSpecName: "config-data") pod "5730e44a-3c94-46ba-8fd9-c659adcbfa31" (UID: "5730e44a-3c94-46ba-8fd9-c659adcbfa31"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.377037 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5730e44a-3c94-46ba-8fd9-c659adcbfa31" (UID: "5730e44a-3c94-46ba-8fd9-c659adcbfa31"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.426920 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2t2n\" (UniqueName: \"kubernetes.io/projected/5730e44a-3c94-46ba-8fd9-c659adcbfa31-kube-api-access-s2t2n\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.426959 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.426972 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.426990 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5730e44a-3c94-46ba-8fd9-c659adcbfa31-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.607910 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4rr45" event={"ID":"5730e44a-3c94-46ba-8fd9-c659adcbfa31","Type":"ContainerDied","Data":"07ddbdd240fe9824bb87f7d09a6e5a771abf22c7a943c7467ef989bdd7ae3131"} Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.607954 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="07ddbdd240fe9824bb87f7d09a6e5a771abf22c7a943c7467ef989bdd7ae3131" Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.609208 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4rr45" Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.874741 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.874994 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" containerName="nova-api-log" containerID="cri-o://29798a23552d2e524b0546cb95c61e05fd9010a103bb37b896e69699ef73bca2" gracePeriod=30 Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.875575 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" containerName="nova-api-api" containerID="cri-o://3080d71c656e2f1867e2574e4f20179a1ea8e26ef0d1bc7b000a71f0b935117e" gracePeriod=30 Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.917415 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.917769 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="385c54a0-5afc-4b5e-8357-567088537989" containerName="nova-scheduler-scheduler" containerID="cri-o://d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3" gracePeriod=30 Jan 27 12:55:14 crc kubenswrapper[4900]: I0127 12:55:14.977308 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:55:15 crc kubenswrapper[4900]: E0127 12:55:15.520033 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6b34fa5_f4ff_4e7e_bdd5_9347ed9dda70.slice/crio-conmon-3080d71c656e2f1867e2574e4f20179a1ea8e26ef0d1bc7b000a71f0b935117e.scope\": RecentStats: unable to find data in memory cache]" Jan 27 12:55:15 crc kubenswrapper[4900]: I0127 12:55:15.636578 4900 generic.go:334] "Generic (PLEG): container finished" podID="e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" containerID="3080d71c656e2f1867e2574e4f20179a1ea8e26ef0d1bc7b000a71f0b935117e" exitCode=0 Jan 27 12:55:15 crc kubenswrapper[4900]: I0127 12:55:15.636650 4900 generic.go:334] "Generic (PLEG): container finished" podID="e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" containerID="29798a23552d2e524b0546cb95c61e05fd9010a103bb37b896e69699ef73bca2" exitCode=143 Jan 27 12:55:15 crc kubenswrapper[4900]: I0127 12:55:15.638811 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70","Type":"ContainerDied","Data":"3080d71c656e2f1867e2574e4f20179a1ea8e26ef0d1bc7b000a71f0b935117e"} Jan 27 12:55:15 crc kubenswrapper[4900]: I0127 12:55:15.638891 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70","Type":"ContainerDied","Data":"29798a23552d2e524b0546cb95c61e05fd9010a103bb37b896e69699ef73bca2"} Jan 27 12:55:15 crc kubenswrapper[4900]: I0127 12:55:15.981413 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.182251 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-logs\") pod \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.182332 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-public-tls-certs\") pod \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.182512 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-combined-ca-bundle\") pod \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.182572 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-internal-tls-certs\") pod \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.182614 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-logs" (OuterVolumeSpecName: "logs") pod "e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" (UID: "e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.182664 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2j49p\" (UniqueName: \"kubernetes.io/projected/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-kube-api-access-2j49p\") pod \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.183477 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-config-data\") pod \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\" (UID: \"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70\") " Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.185728 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.205114 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-kube-api-access-2j49p" (OuterVolumeSpecName: "kube-api-access-2j49p") pod "e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" (UID: "e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70"). InnerVolumeSpecName "kube-api-access-2j49p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.216876 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-config-data" (OuterVolumeSpecName: "config-data") pod "e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" (UID: "e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.232209 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" (UID: "e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.261439 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" (UID: "e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.268405 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" (UID: "e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.288620 4900 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.290299 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.290328 4900 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.290342 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2j49p\" (UniqueName: \"kubernetes.io/projected/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-kube-api-access-2j49p\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.290360 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.652234 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70","Type":"ContainerDied","Data":"a40cf3efd661aa162db755fd7bc58ab4a0b1b15b1ec7971f6bd90f61e4595bca"} Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.652298 4900 scope.go:117] "RemoveContainer" containerID="3080d71c656e2f1867e2574e4f20179a1ea8e26ef0d1bc7b000a71f0b935117e" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.652264 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.652431 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" containerName="nova-metadata-log" containerID="cri-o://2df98d116b0efe4afd1bc527418c7be73b3a2e18acfa2c26778b5edb919c172c" gracePeriod=30 Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.652595 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" containerName="nova-metadata-metadata" containerID="cri-o://5eee246224bbda1ab6e78741a7940dbff1cfb5879cf971fae62208f7560f24c5" gracePeriod=30 Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.687877 4900 scope.go:117] "RemoveContainer" containerID="29798a23552d2e524b0546cb95c61e05fd9010a103bb37b896e69699ef73bca2" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.698158 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.725924 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.747935 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 27 12:55:16 crc kubenswrapper[4900]: E0127 12:55:16.748674 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5730e44a-3c94-46ba-8fd9-c659adcbfa31" containerName="nova-manage" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.748700 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="5730e44a-3c94-46ba-8fd9-c659adcbfa31" containerName="nova-manage" Jan 27 12:55:16 crc kubenswrapper[4900]: E0127 12:55:16.748709 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" containerName="nova-api-api" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.748716 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" containerName="nova-api-api" Jan 27 12:55:16 crc kubenswrapper[4900]: E0127 12:55:16.748740 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" containerName="init" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.748747 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" containerName="init" Jan 27 12:55:16 crc kubenswrapper[4900]: E0127 12:55:16.748761 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" containerName="nova-api-log" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.748767 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" containerName="nova-api-log" Jan 27 12:55:16 crc kubenswrapper[4900]: E0127 12:55:16.748780 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" containerName="dnsmasq-dns" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.748787 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" containerName="dnsmasq-dns" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.749104 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc6fa3fb-0ecf-4b99-a61c-6c448856afbc" containerName="dnsmasq-dns" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.749127 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="5730e44a-3c94-46ba-8fd9-c659adcbfa31" containerName="nova-manage" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.749138 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" containerName="nova-api-log" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.749148 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" containerName="nova-api-api" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.750585 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.753164 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.753343 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.755443 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.775443 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.809877 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.810285 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-public-tls-certs\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.810325 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rxtr\" (UniqueName: \"kubernetes.io/projected/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-kube-api-access-7rxtr\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.810379 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-logs\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.810441 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.810492 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-config-data\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.913892 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-public-tls-certs\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.913952 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rxtr\" (UniqueName: \"kubernetes.io/projected/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-kube-api-access-7rxtr\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.914044 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-logs\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.914169 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.914268 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-config-data\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.914366 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.914829 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-logs\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.923796 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-public-tls-certs\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.923861 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-config-data\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.924587 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.930424 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:16 crc kubenswrapper[4900]: I0127 12:55:16.940850 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rxtr\" (UniqueName: \"kubernetes.io/projected/5c2eb9fa-5015-4196-a6f3-a01d848d6c67-kube-api-access-7rxtr\") pod \"nova-api-0\" (UID: \"5c2eb9fa-5015-4196-a6f3-a01d848d6c67\") " pod="openstack/nova-api-0" Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.171048 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.481929 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:55:17 crc kubenswrapper[4900]: E0127 12:55:17.482919 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:55:17 crc kubenswrapper[4900]: E0127 12:55:17.520009 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3 is running failed: container process not found" containerID="d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 27 12:55:17 crc kubenswrapper[4900]: E0127 12:55:17.520481 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3 is running failed: container process not found" containerID="d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 27 12:55:17 crc kubenswrapper[4900]: E0127 12:55:17.520716 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3 is running failed: container process not found" containerID="d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 27 12:55:17 crc kubenswrapper[4900]: E0127 12:55:17.520739 4900 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="385c54a0-5afc-4b5e-8357-567088537989" containerName="nova-scheduler-scheduler" Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.592890 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.679708 4900 generic.go:334] "Generic (PLEG): container finished" podID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" containerID="2df98d116b0efe4afd1bc527418c7be73b3a2e18acfa2c26778b5edb919c172c" exitCode=143 Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.679792 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b6fcc6fd-95bc-4e8a-896f-4a60268a9055","Type":"ContainerDied","Data":"2df98d116b0efe4afd1bc527418c7be73b3a2e18acfa2c26778b5edb919c172c"} Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.684685 4900 generic.go:334] "Generic (PLEG): container finished" podID="385c54a0-5afc-4b5e-8357-567088537989" containerID="d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3" exitCode=0 Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.684743 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"385c54a0-5afc-4b5e-8357-567088537989","Type":"ContainerDied","Data":"d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3"} Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.684780 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"385c54a0-5afc-4b5e-8357-567088537989","Type":"ContainerDied","Data":"dbd297cc07f174ecb2343a0c7d99c1d65d601df8d3970e76116912bbbe1a91b3"} Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.684799 4900 scope.go:117] "RemoveContainer" containerID="d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3" Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.684944 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.718703 4900 scope.go:117] "RemoveContainer" containerID="d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3" Jan 27 12:55:17 crc kubenswrapper[4900]: E0127 12:55:17.719489 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3\": container with ID starting with d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3 not found: ID does not exist" containerID="d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3" Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.719549 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3"} err="failed to get container status \"d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3\": rpc error: code = NotFound desc = could not find container \"d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3\": container with ID starting with d10bb6e4fe0c1fdd9062f00c0b79708947af06e9256843063f67ef3e1925b4b3 not found: ID does not exist" Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.741376 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/385c54a0-5afc-4b5e-8357-567088537989-config-data\") pod \"385c54a0-5afc-4b5e-8357-567088537989\" (UID: \"385c54a0-5afc-4b5e-8357-567088537989\") " Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.741578 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4x844\" (UniqueName: \"kubernetes.io/projected/385c54a0-5afc-4b5e-8357-567088537989-kube-api-access-4x844\") pod \"385c54a0-5afc-4b5e-8357-567088537989\" (UID: \"385c54a0-5afc-4b5e-8357-567088537989\") " Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.741661 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/385c54a0-5afc-4b5e-8357-567088537989-combined-ca-bundle\") pod \"385c54a0-5afc-4b5e-8357-567088537989\" (UID: \"385c54a0-5afc-4b5e-8357-567088537989\") " Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.763248 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/385c54a0-5afc-4b5e-8357-567088537989-kube-api-access-4x844" (OuterVolumeSpecName: "kube-api-access-4x844") pod "385c54a0-5afc-4b5e-8357-567088537989" (UID: "385c54a0-5afc-4b5e-8357-567088537989"). InnerVolumeSpecName "kube-api-access-4x844". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.792404 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/385c54a0-5afc-4b5e-8357-567088537989-config-data" (OuterVolumeSpecName: "config-data") pod "385c54a0-5afc-4b5e-8357-567088537989" (UID: "385c54a0-5afc-4b5e-8357-567088537989"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.807322 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/385c54a0-5afc-4b5e-8357-567088537989-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "385c54a0-5afc-4b5e-8357-567088537989" (UID: "385c54a0-5afc-4b5e-8357-567088537989"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.869104 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/385c54a0-5afc-4b5e-8357-567088537989-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.869984 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4x844\" (UniqueName: \"kubernetes.io/projected/385c54a0-5afc-4b5e-8357-567088537989-kube-api-access-4x844\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.870097 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/385c54a0-5afc-4b5e-8357-567088537989-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:17 crc kubenswrapper[4900]: I0127 12:55:17.872412 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.058012 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.082114 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.095643 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:55:18 crc kubenswrapper[4900]: E0127 12:55:18.096262 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="385c54a0-5afc-4b5e-8357-567088537989" containerName="nova-scheduler-scheduler" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.096284 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="385c54a0-5afc-4b5e-8357-567088537989" containerName="nova-scheduler-scheduler" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.096513 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="385c54a0-5afc-4b5e-8357-567088537989" containerName="nova-scheduler-scheduler" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.097460 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.102825 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.139936 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.297179 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8782c194-2052-418f-9765-b895062a6fab-config-data\") pod \"nova-scheduler-0\" (UID: \"8782c194-2052-418f-9765-b895062a6fab\") " pod="openstack/nova-scheduler-0" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.297566 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8782c194-2052-418f-9765-b895062a6fab-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8782c194-2052-418f-9765-b895062a6fab\") " pod="openstack/nova-scheduler-0" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.297833 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6sj7g\" (UniqueName: \"kubernetes.io/projected/8782c194-2052-418f-9765-b895062a6fab-kube-api-access-6sj7g\") pod \"nova-scheduler-0\" (UID: \"8782c194-2052-418f-9765-b895062a6fab\") " pod="openstack/nova-scheduler-0" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.426066 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6sj7g\" (UniqueName: \"kubernetes.io/projected/8782c194-2052-418f-9765-b895062a6fab-kube-api-access-6sj7g\") pod \"nova-scheduler-0\" (UID: \"8782c194-2052-418f-9765-b895062a6fab\") " pod="openstack/nova-scheduler-0" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.426282 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8782c194-2052-418f-9765-b895062a6fab-config-data\") pod \"nova-scheduler-0\" (UID: \"8782c194-2052-418f-9765-b895062a6fab\") " pod="openstack/nova-scheduler-0" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.426416 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8782c194-2052-418f-9765-b895062a6fab-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8782c194-2052-418f-9765-b895062a6fab\") " pod="openstack/nova-scheduler-0" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.433390 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8782c194-2052-418f-9765-b895062a6fab-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8782c194-2052-418f-9765-b895062a6fab\") " pod="openstack/nova-scheduler-0" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.437215 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8782c194-2052-418f-9765-b895062a6fab-config-data\") pod \"nova-scheduler-0\" (UID: \"8782c194-2052-418f-9765-b895062a6fab\") " pod="openstack/nova-scheduler-0" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.450365 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6sj7g\" (UniqueName: \"kubernetes.io/projected/8782c194-2052-418f-9765-b895062a6fab-kube-api-access-6sj7g\") pod \"nova-scheduler-0\" (UID: \"8782c194-2052-418f-9765-b895062a6fab\") " pod="openstack/nova-scheduler-0" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.503142 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="385c54a0-5afc-4b5e-8357-567088537989" path="/var/lib/kubelet/pods/385c54a0-5afc-4b5e-8357-567088537989/volumes" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.503770 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70" path="/var/lib/kubelet/pods/e6b34fa5-f4ff-4e7e-bdd5-9347ed9dda70/volumes" Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.714600 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5c2eb9fa-5015-4196-a6f3-a01d848d6c67","Type":"ContainerStarted","Data":"40eafd755d9f7e095dd5860772ac1174358a65fa1d1e5c02464ebd80f7ea8fe9"} Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.715154 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5c2eb9fa-5015-4196-a6f3-a01d848d6c67","Type":"ContainerStarted","Data":"3e1cdc9cd45f900740bcc6a2f067c315f8ebd7231caee6142a67ac2276beccd5"} Jan 27 12:55:18 crc kubenswrapper[4900]: I0127 12:55:18.720485 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 27 12:55:19 crc kubenswrapper[4900]: E0127 12:55:19.137434 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6b34fa5_f4ff_4e7e_bdd5_9347ed9dda70.slice\": RecentStats: unable to find data in memory cache]" Jan 27 12:55:19 crc kubenswrapper[4900]: I0127 12:55:19.308318 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 27 12:55:19 crc kubenswrapper[4900]: I0127 12:55:19.732204 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8782c194-2052-418f-9765-b895062a6fab","Type":"ContainerStarted","Data":"85f498ca27c5ef5624333f718e82d320ae7098e73ee31e5613bbc58652d16e33"} Jan 27 12:55:19 crc kubenswrapper[4900]: I0127 12:55:19.732508 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8782c194-2052-418f-9765-b895062a6fab","Type":"ContainerStarted","Data":"234ab914913e9f42f05c1d92d69a14a00fb12791c4f5cf0861107d5672b92d9f"} Jan 27 12:55:19 crc kubenswrapper[4900]: I0127 12:55:19.735422 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5c2eb9fa-5015-4196-a6f3-a01d848d6c67","Type":"ContainerStarted","Data":"d19409c5630f75407792501754996539d43d401f6b5f641e479e54bf3900c168"} Jan 27 12:55:19 crc kubenswrapper[4900]: I0127 12:55:19.753870 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.7538346329999999 podStartE2EDuration="1.753834633s" podCreationTimestamp="2026-01-27 12:55:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:55:19.750169816 +0000 UTC m=+1746.987198026" watchObservedRunningTime="2026-01-27 12:55:19.753834633 +0000 UTC m=+1746.990862833" Jan 27 12:55:19 crc kubenswrapper[4900]: I0127 12:55:19.807664 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.807638544 podStartE2EDuration="3.807638544s" podCreationTimestamp="2026-01-27 12:55:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:55:19.772737821 +0000 UTC m=+1747.009766041" watchObservedRunningTime="2026-01-27 12:55:19.807638544 +0000 UTC m=+1747.044666754" Jan 27 12:55:19 crc kubenswrapper[4900]: I0127 12:55:19.874316 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.0:8775/\": read tcp 10.217.0.2:38344->10.217.1.0:8775: read: connection reset by peer" Jan 27 12:55:19 crc kubenswrapper[4900]: I0127 12:55:19.874316 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.0:8775/\": read tcp 10.217.0.2:38340->10.217.1.0:8775: read: connection reset by peer" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.474900 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.592284 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-logs\") pod \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.592702 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ph97t\" (UniqueName: \"kubernetes.io/projected/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-kube-api-access-ph97t\") pod \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.592849 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-config-data\") pod \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.592972 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-combined-ca-bundle\") pod \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.593010 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-nova-metadata-tls-certs\") pod \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\" (UID: \"b6fcc6fd-95bc-4e8a-896f-4a60268a9055\") " Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.601571 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-logs" (OuterVolumeSpecName: "logs") pod "b6fcc6fd-95bc-4e8a-896f-4a60268a9055" (UID: "b6fcc6fd-95bc-4e8a-896f-4a60268a9055"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.619984 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-kube-api-access-ph97t" (OuterVolumeSpecName: "kube-api-access-ph97t") pod "b6fcc6fd-95bc-4e8a-896f-4a60268a9055" (UID: "b6fcc6fd-95bc-4e8a-896f-4a60268a9055"). InnerVolumeSpecName "kube-api-access-ph97t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.666534 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-config-data" (OuterVolumeSpecName: "config-data") pod "b6fcc6fd-95bc-4e8a-896f-4a60268a9055" (UID: "b6fcc6fd-95bc-4e8a-896f-4a60268a9055"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.683589 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6fcc6fd-95bc-4e8a-896f-4a60268a9055" (UID: "b6fcc6fd-95bc-4e8a-896f-4a60268a9055"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.690294 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "b6fcc6fd-95bc-4e8a-896f-4a60268a9055" (UID: "b6fcc6fd-95bc-4e8a-896f-4a60268a9055"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.702479 4900 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-logs\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.702534 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ph97t\" (UniqueName: \"kubernetes.io/projected/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-kube-api-access-ph97t\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.702551 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.702567 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.702584 4900 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6fcc6fd-95bc-4e8a-896f-4a60268a9055-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.756956 4900 generic.go:334] "Generic (PLEG): container finished" podID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" containerID="5eee246224bbda1ab6e78741a7940dbff1cfb5879cf971fae62208f7560f24c5" exitCode=0 Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.757229 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.757256 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b6fcc6fd-95bc-4e8a-896f-4a60268a9055","Type":"ContainerDied","Data":"5eee246224bbda1ab6e78741a7940dbff1cfb5879cf971fae62208f7560f24c5"} Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.758429 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b6fcc6fd-95bc-4e8a-896f-4a60268a9055","Type":"ContainerDied","Data":"b7374ed198199be6dfefea4afbb39b9cd31ba1231562237b612bead9a34cf5fa"} Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.758516 4900 scope.go:117] "RemoveContainer" containerID="5eee246224bbda1ab6e78741a7940dbff1cfb5879cf971fae62208f7560f24c5" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.817626 4900 scope.go:117] "RemoveContainer" containerID="2df98d116b0efe4afd1bc527418c7be73b3a2e18acfa2c26778b5edb919c172c" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.835136 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.868266 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.873154 4900 scope.go:117] "RemoveContainer" containerID="5eee246224bbda1ab6e78741a7940dbff1cfb5879cf971fae62208f7560f24c5" Jan 27 12:55:20 crc kubenswrapper[4900]: E0127 12:55:20.874462 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5eee246224bbda1ab6e78741a7940dbff1cfb5879cf971fae62208f7560f24c5\": container with ID starting with 5eee246224bbda1ab6e78741a7940dbff1cfb5879cf971fae62208f7560f24c5 not found: ID does not exist" containerID="5eee246224bbda1ab6e78741a7940dbff1cfb5879cf971fae62208f7560f24c5" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.874501 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5eee246224bbda1ab6e78741a7940dbff1cfb5879cf971fae62208f7560f24c5"} err="failed to get container status \"5eee246224bbda1ab6e78741a7940dbff1cfb5879cf971fae62208f7560f24c5\": rpc error: code = NotFound desc = could not find container \"5eee246224bbda1ab6e78741a7940dbff1cfb5879cf971fae62208f7560f24c5\": container with ID starting with 5eee246224bbda1ab6e78741a7940dbff1cfb5879cf971fae62208f7560f24c5 not found: ID does not exist" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.874527 4900 scope.go:117] "RemoveContainer" containerID="2df98d116b0efe4afd1bc527418c7be73b3a2e18acfa2c26778b5edb919c172c" Jan 27 12:55:20 crc kubenswrapper[4900]: E0127 12:55:20.874955 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2df98d116b0efe4afd1bc527418c7be73b3a2e18acfa2c26778b5edb919c172c\": container with ID starting with 2df98d116b0efe4afd1bc527418c7be73b3a2e18acfa2c26778b5edb919c172c not found: ID does not exist" containerID="2df98d116b0efe4afd1bc527418c7be73b3a2e18acfa2c26778b5edb919c172c" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.874992 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2df98d116b0efe4afd1bc527418c7be73b3a2e18acfa2c26778b5edb919c172c"} err="failed to get container status \"2df98d116b0efe4afd1bc527418c7be73b3a2e18acfa2c26778b5edb919c172c\": rpc error: code = NotFound desc = could not find container \"2df98d116b0efe4afd1bc527418c7be73b3a2e18acfa2c26778b5edb919c172c\": container with ID starting with 2df98d116b0efe4afd1bc527418c7be73b3a2e18acfa2c26778b5edb919c172c not found: ID does not exist" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.883649 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:55:20 crc kubenswrapper[4900]: E0127 12:55:20.884641 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" containerName="nova-metadata-metadata" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.884741 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" containerName="nova-metadata-metadata" Jan 27 12:55:20 crc kubenswrapper[4900]: E0127 12:55:20.884885 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" containerName="nova-metadata-log" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.884968 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" containerName="nova-metadata-log" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.885324 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" containerName="nova-metadata-log" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.885411 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" containerName="nova-metadata-metadata" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.887273 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.894726 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.895010 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 27 12:55:20 crc kubenswrapper[4900]: I0127 12:55:20.898359 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.009375 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/71a49337-f4cf-48fb-936b-3869052594cd-logs\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.010735 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71a49337-f4cf-48fb-936b-3869052594cd-config-data\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.011260 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npp5v\" (UniqueName: \"kubernetes.io/projected/71a49337-f4cf-48fb-936b-3869052594cd-kube-api-access-npp5v\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.011787 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/71a49337-f4cf-48fb-936b-3869052594cd-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.011947 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71a49337-f4cf-48fb-936b-3869052594cd-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.113916 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/71a49337-f4cf-48fb-936b-3869052594cd-logs\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.114123 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71a49337-f4cf-48fb-936b-3869052594cd-config-data\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.114179 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npp5v\" (UniqueName: \"kubernetes.io/projected/71a49337-f4cf-48fb-936b-3869052594cd-kube-api-access-npp5v\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.114348 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/71a49337-f4cf-48fb-936b-3869052594cd-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.114406 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71a49337-f4cf-48fb-936b-3869052594cd-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.114426 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/71a49337-f4cf-48fb-936b-3869052594cd-logs\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.118399 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71a49337-f4cf-48fb-936b-3869052594cd-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.118406 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71a49337-f4cf-48fb-936b-3869052594cd-config-data\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.119630 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/71a49337-f4cf-48fb-936b-3869052594cd-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.134202 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npp5v\" (UniqueName: \"kubernetes.io/projected/71a49337-f4cf-48fb-936b-3869052594cd-kube-api-access-npp5v\") pod \"nova-metadata-0\" (UID: \"71a49337-f4cf-48fb-936b-3869052594cd\") " pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.227276 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 27 12:55:21 crc kubenswrapper[4900]: I0127 12:55:21.798366 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 27 12:55:21 crc kubenswrapper[4900]: W0127 12:55:21.799117 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod71a49337_f4cf_48fb_936b_3869052594cd.slice/crio-2b49a3875e7f6bc758f5e14909a595c0398600b9ac3d86d84b00da55519e869d WatchSource:0}: Error finding container 2b49a3875e7f6bc758f5e14909a595c0398600b9ac3d86d84b00da55519e869d: Status 404 returned error can't find the container with id 2b49a3875e7f6bc758f5e14909a595c0398600b9ac3d86d84b00da55519e869d Jan 27 12:55:22 crc kubenswrapper[4900]: I0127 12:55:22.500447 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6fcc6fd-95bc-4e8a-896f-4a60268a9055" path="/var/lib/kubelet/pods/b6fcc6fd-95bc-4e8a-896f-4a60268a9055/volumes" Jan 27 12:55:22 crc kubenswrapper[4900]: I0127 12:55:22.532625 4900 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod6ca41e27-112f-461c-9556-768eea0cbdf6"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod6ca41e27-112f-461c-9556-768eea0cbdf6] : Timed out while waiting for systemd to remove kubepods-besteffort-pod6ca41e27_112f_461c_9556_768eea0cbdf6.slice" Jan 27 12:55:22 crc kubenswrapper[4900]: I0127 12:55:22.806339 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"71a49337-f4cf-48fb-936b-3869052594cd","Type":"ContainerStarted","Data":"bd8783e99bfd34bd6f0ef354ae7c7614b1f69aac6192741317bf268ff28891c5"} Jan 27 12:55:22 crc kubenswrapper[4900]: I0127 12:55:22.806408 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"71a49337-f4cf-48fb-936b-3869052594cd","Type":"ContainerStarted","Data":"774276dc057f2f349683941b93c8aed313753fc71845527c0063439bd69e2a90"} Jan 27 12:55:22 crc kubenswrapper[4900]: I0127 12:55:22.806424 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"71a49337-f4cf-48fb-936b-3869052594cd","Type":"ContainerStarted","Data":"2b49a3875e7f6bc758f5e14909a595c0398600b9ac3d86d84b00da55519e869d"} Jan 27 12:55:22 crc kubenswrapper[4900]: I0127 12:55:22.837043 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.837016423 podStartE2EDuration="2.837016423s" podCreationTimestamp="2026-01-27 12:55:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:55:22.827766284 +0000 UTC m=+1750.064794494" watchObservedRunningTime="2026-01-27 12:55:22.837016423 +0000 UTC m=+1750.074044633" Jan 27 12:55:23 crc kubenswrapper[4900]: I0127 12:55:23.720911 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 27 12:55:26 crc kubenswrapper[4900]: I0127 12:55:26.229241 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 27 12:55:26 crc kubenswrapper[4900]: I0127 12:55:26.229835 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 27 12:55:27 crc kubenswrapper[4900]: I0127 12:55:27.171401 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 27 12:55:27 crc kubenswrapper[4900]: I0127 12:55:27.171496 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 27 12:55:28 crc kubenswrapper[4900]: I0127 12:55:28.191483 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5c2eb9fa-5015-4196-a6f3-a01d848d6c67" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.8:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 12:55:28 crc kubenswrapper[4900]: I0127 12:55:28.191558 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5c2eb9fa-5015-4196-a6f3-a01d848d6c67" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.8:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 12:55:28 crc kubenswrapper[4900]: I0127 12:55:28.720829 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 27 12:55:28 crc kubenswrapper[4900]: I0127 12:55:28.753147 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 27 12:55:28 crc kubenswrapper[4900]: I0127 12:55:28.937270 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 27 12:55:29 crc kubenswrapper[4900]: E0127 12:55:29.538296 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6b34fa5_f4ff_4e7e_bdd5_9347ed9dda70.slice\": RecentStats: unable to find data in memory cache]" Jan 27 12:55:30 crc kubenswrapper[4900]: E0127 12:55:30.260892 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6b34fa5_f4ff_4e7e_bdd5_9347ed9dda70.slice\": RecentStats: unable to find data in memory cache]" Jan 27 12:55:30 crc kubenswrapper[4900]: I0127 12:55:30.483748 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:55:30 crc kubenswrapper[4900]: E0127 12:55:30.484449 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:55:31 crc kubenswrapper[4900]: I0127 12:55:31.228995 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 27 12:55:31 crc kubenswrapper[4900]: I0127 12:55:31.229381 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 27 12:55:32 crc kubenswrapper[4900]: I0127 12:55:32.241342 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="71a49337-f4cf-48fb-936b-3869052594cd" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.10:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 12:55:32 crc kubenswrapper[4900]: I0127 12:55:32.241389 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="71a49337-f4cf-48fb-936b-3869052594cd" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.10:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 12:55:37 crc kubenswrapper[4900]: I0127 12:55:37.179024 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 27 12:55:37 crc kubenswrapper[4900]: I0127 12:55:37.179818 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 27 12:55:37 crc kubenswrapper[4900]: I0127 12:55:37.181846 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 27 12:55:37 crc kubenswrapper[4900]: I0127 12:55:37.187868 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 27 12:55:38 crc kubenswrapper[4900]: I0127 12:55:38.002242 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 27 12:55:38 crc kubenswrapper[4900]: I0127 12:55:38.009915 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 27 12:55:38 crc kubenswrapper[4900]: I0127 12:55:38.125674 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 27 12:55:39 crc kubenswrapper[4900]: E0127 12:55:39.857368 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6b34fa5_f4ff_4e7e_bdd5_9347ed9dda70.slice\": RecentStats: unable to find data in memory cache]" Jan 27 12:55:41 crc kubenswrapper[4900]: I0127 12:55:41.233931 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 27 12:55:41 crc kubenswrapper[4900]: I0127 12:55:41.240560 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 27 12:55:41 crc kubenswrapper[4900]: I0127 12:55:41.241123 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 27 12:55:41 crc kubenswrapper[4900]: I0127 12:55:41.483742 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:55:41 crc kubenswrapper[4900]: E0127 12:55:41.484215 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:55:42 crc kubenswrapper[4900]: I0127 12:55:42.173449 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 27 12:55:45 crc kubenswrapper[4900]: E0127 12:55:45.597900 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6b34fa5_f4ff_4e7e_bdd5_9347ed9dda70.slice\": RecentStats: unable to find data in memory cache]" Jan 27 12:55:48 crc kubenswrapper[4900]: E0127 12:55:48.143341 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6b34fa5_f4ff_4e7e_bdd5_9347ed9dda70.slice\": RecentStats: unable to find data in memory cache]" Jan 27 12:55:48 crc kubenswrapper[4900]: E0127 12:55:48.144748 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6b34fa5_f4ff_4e7e_bdd5_9347ed9dda70.slice\": RecentStats: unable to find data in memory cache]" Jan 27 12:55:50 crc kubenswrapper[4900]: E0127 12:55:50.216044 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6b34fa5_f4ff_4e7e_bdd5_9347ed9dda70.slice\": RecentStats: unable to find data in memory cache]" Jan 27 12:55:52 crc kubenswrapper[4900]: I0127 12:55:52.989803 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-nwqdh"] Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.003910 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-nwqdh"] Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.081932 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-z6j69"] Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.084296 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-z6j69" Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.131162 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-z6j69"] Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.260629 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzsls\" (UniqueName: \"kubernetes.io/projected/01b46bc7-748f-4872-abb3-a7faef291c0b-kube-api-access-kzsls\") pod \"heat-db-sync-z6j69\" (UID: \"01b46bc7-748f-4872-abb3-a7faef291c0b\") " pod="openstack/heat-db-sync-z6j69" Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.261073 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01b46bc7-748f-4872-abb3-a7faef291c0b-config-data\") pod \"heat-db-sync-z6j69\" (UID: \"01b46bc7-748f-4872-abb3-a7faef291c0b\") " pod="openstack/heat-db-sync-z6j69" Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.261360 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01b46bc7-748f-4872-abb3-a7faef291c0b-combined-ca-bundle\") pod \"heat-db-sync-z6j69\" (UID: \"01b46bc7-748f-4872-abb3-a7faef291c0b\") " pod="openstack/heat-db-sync-z6j69" Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.364501 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzsls\" (UniqueName: \"kubernetes.io/projected/01b46bc7-748f-4872-abb3-a7faef291c0b-kube-api-access-kzsls\") pod \"heat-db-sync-z6j69\" (UID: \"01b46bc7-748f-4872-abb3-a7faef291c0b\") " pod="openstack/heat-db-sync-z6j69" Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.364656 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01b46bc7-748f-4872-abb3-a7faef291c0b-config-data\") pod \"heat-db-sync-z6j69\" (UID: \"01b46bc7-748f-4872-abb3-a7faef291c0b\") " pod="openstack/heat-db-sync-z6j69" Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.364749 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01b46bc7-748f-4872-abb3-a7faef291c0b-combined-ca-bundle\") pod \"heat-db-sync-z6j69\" (UID: \"01b46bc7-748f-4872-abb3-a7faef291c0b\") " pod="openstack/heat-db-sync-z6j69" Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.375287 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01b46bc7-748f-4872-abb3-a7faef291c0b-config-data\") pod \"heat-db-sync-z6j69\" (UID: \"01b46bc7-748f-4872-abb3-a7faef291c0b\") " pod="openstack/heat-db-sync-z6j69" Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.392204 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01b46bc7-748f-4872-abb3-a7faef291c0b-combined-ca-bundle\") pod \"heat-db-sync-z6j69\" (UID: \"01b46bc7-748f-4872-abb3-a7faef291c0b\") " pod="openstack/heat-db-sync-z6j69" Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.411768 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzsls\" (UniqueName: \"kubernetes.io/projected/01b46bc7-748f-4872-abb3-a7faef291c0b-kube-api-access-kzsls\") pod \"heat-db-sync-z6j69\" (UID: \"01b46bc7-748f-4872-abb3-a7faef291c0b\") " pod="openstack/heat-db-sync-z6j69" Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.419193 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-z6j69" Jan 27 12:55:53 crc kubenswrapper[4900]: I0127 12:55:53.488523 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:55:53 crc kubenswrapper[4900]: E0127 12:55:53.489348 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:55:54 crc kubenswrapper[4900]: I0127 12:55:54.551952 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5375696-4614-47d4-a8aa-2a98bdd0bd17" path="/var/lib/kubelet/pods/b5375696-4614-47d4-a8aa-2a98bdd0bd17/volumes" Jan 27 12:55:54 crc kubenswrapper[4900]: I0127 12:55:54.594947 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-z6j69"] Jan 27 12:55:55 crc kubenswrapper[4900]: I0127 12:55:55.163516 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 27 12:55:55 crc kubenswrapper[4900]: I0127 12:55:55.342302 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-z6j69" event={"ID":"01b46bc7-748f-4872-abb3-a7faef291c0b","Type":"ContainerStarted","Data":"37560e99d5aa0b9fa20b8cc289e0a552c318fad7a762f910ed0640deb29ea643"} Jan 27 12:55:58 crc kubenswrapper[4900]: I0127 12:55:58.196007 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 12:56:00 crc kubenswrapper[4900]: E0127 12:56:00.507671 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6b34fa5_f4ff_4e7e_bdd5_9347ed9dda70.slice\": RecentStats: unable to find data in memory cache]" Jan 27 12:56:00 crc kubenswrapper[4900]: E0127 12:56:00.508416 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6b34fa5_f4ff_4e7e_bdd5_9347ed9dda70.slice\": RecentStats: unable to find data in memory cache]" Jan 27 12:56:01 crc kubenswrapper[4900]: I0127 12:56:01.308431 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:56:01 crc kubenswrapper[4900]: I0127 12:56:01.308988 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="ceilometer-central-agent" containerID="cri-o://986a18ba90d6b0ae2c07214f57514e5accfd9b71cc3a1c965aaed030fd5c5956" gracePeriod=30 Jan 27 12:56:01 crc kubenswrapper[4900]: I0127 12:56:01.309139 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="proxy-httpd" containerID="cri-o://ff3ac518790cf8fbd8982de00a235d12d7efbf4ee948c875398639254a43e04b" gracePeriod=30 Jan 27 12:56:01 crc kubenswrapper[4900]: I0127 12:56:01.309179 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="sg-core" containerID="cri-o://75e25f1e3674dd01cabc41f4a3ee2dd01dee96ea3a87dba6384183400cb73aef" gracePeriod=30 Jan 27 12:56:01 crc kubenswrapper[4900]: I0127 12:56:01.309208 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="ceilometer-notification-agent" containerID="cri-o://10901faaf20beb816b152adc3f9842c06d6ad3703b05633fee9cd9087d2cd11d" gracePeriod=30 Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.103691 4900 generic.go:334] "Generic (PLEG): container finished" podID="b4b3481e-e421-4a42-aa33-8327e969136a" containerID="ff3ac518790cf8fbd8982de00a235d12d7efbf4ee948c875398639254a43e04b" exitCode=0 Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.103940 4900 generic.go:334] "Generic (PLEG): container finished" podID="b4b3481e-e421-4a42-aa33-8327e969136a" containerID="75e25f1e3674dd01cabc41f4a3ee2dd01dee96ea3a87dba6384183400cb73aef" exitCode=2 Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.103951 4900 generic.go:334] "Generic (PLEG): container finished" podID="b4b3481e-e421-4a42-aa33-8327e969136a" containerID="986a18ba90d6b0ae2c07214f57514e5accfd9b71cc3a1c965aaed030fd5c5956" exitCode=0 Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.104004 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4b3481e-e421-4a42-aa33-8327e969136a","Type":"ContainerDied","Data":"ff3ac518790cf8fbd8982de00a235d12d7efbf4ee948c875398639254a43e04b"} Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.104051 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4b3481e-e421-4a42-aa33-8327e969136a","Type":"ContainerDied","Data":"75e25f1e3674dd01cabc41f4a3ee2dd01dee96ea3a87dba6384183400cb73aef"} Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.104064 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4b3481e-e421-4a42-aa33-8327e969136a","Type":"ContainerDied","Data":"986a18ba90d6b0ae2c07214f57514e5accfd9b71cc3a1c965aaed030fd5c5956"} Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.718843 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-2" podUID="9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" containerName="rabbitmq" containerID="cri-o://d61edc48b89987cfbdf4d9a6002014c3ed5eed4ee311dd316a4924ab0d5efdbd" gracePeriod=604793 Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.752005 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.956639 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-combined-ca-bundle\") pod \"b4b3481e-e421-4a42-aa33-8327e969136a\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.956821 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-config-data\") pod \"b4b3481e-e421-4a42-aa33-8327e969136a\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.957849 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-ceilometer-tls-certs\") pod \"b4b3481e-e421-4a42-aa33-8327e969136a\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.958014 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-sg-core-conf-yaml\") pod \"b4b3481e-e421-4a42-aa33-8327e969136a\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.958047 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vwflj\" (UniqueName: \"kubernetes.io/projected/b4b3481e-e421-4a42-aa33-8327e969136a-kube-api-access-vwflj\") pod \"b4b3481e-e421-4a42-aa33-8327e969136a\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.958152 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4b3481e-e421-4a42-aa33-8327e969136a-log-httpd\") pod \"b4b3481e-e421-4a42-aa33-8327e969136a\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.958349 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-scripts\") pod \"b4b3481e-e421-4a42-aa33-8327e969136a\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.958440 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4b3481e-e421-4a42-aa33-8327e969136a-run-httpd\") pod \"b4b3481e-e421-4a42-aa33-8327e969136a\" (UID: \"b4b3481e-e421-4a42-aa33-8327e969136a\") " Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.960175 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4b3481e-e421-4a42-aa33-8327e969136a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b4b3481e-e421-4a42-aa33-8327e969136a" (UID: "b4b3481e-e421-4a42-aa33-8327e969136a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.960527 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4b3481e-e421-4a42-aa33-8327e969136a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b4b3481e-e421-4a42-aa33-8327e969136a" (UID: "b4b3481e-e421-4a42-aa33-8327e969136a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.970083 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4b3481e-e421-4a42-aa33-8327e969136a-kube-api-access-vwflj" (OuterVolumeSpecName: "kube-api-access-vwflj") pod "b4b3481e-e421-4a42-aa33-8327e969136a" (UID: "b4b3481e-e421-4a42-aa33-8327e969136a"). InnerVolumeSpecName "kube-api-access-vwflj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:56:02 crc kubenswrapper[4900]: I0127 12:56:02.973169 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-scripts" (OuterVolumeSpecName: "scripts") pod "b4b3481e-e421-4a42-aa33-8327e969136a" (UID: "b4b3481e-e421-4a42-aa33-8327e969136a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.061741 4900 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4b3481e-e421-4a42-aa33-8327e969136a-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.061776 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vwflj\" (UniqueName: \"kubernetes.io/projected/b4b3481e-e421-4a42-aa33-8327e969136a-kube-api-access-vwflj\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.061789 4900 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4b3481e-e421-4a42-aa33-8327e969136a-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.061797 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.166255 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b4b3481e-e421-4a42-aa33-8327e969136a" (UID: "b4b3481e-e421-4a42-aa33-8327e969136a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.166535 4900 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.187252 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "b4b3481e-e421-4a42-aa33-8327e969136a" (UID: "b4b3481e-e421-4a42-aa33-8327e969136a"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.194727 4900 generic.go:334] "Generic (PLEG): container finished" podID="b4b3481e-e421-4a42-aa33-8327e969136a" containerID="10901faaf20beb816b152adc3f9842c06d6ad3703b05633fee9cd9087d2cd11d" exitCode=0 Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.194847 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4b3481e-e421-4a42-aa33-8327e969136a","Type":"ContainerDied","Data":"10901faaf20beb816b152adc3f9842c06d6ad3703b05633fee9cd9087d2cd11d"} Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.194894 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4b3481e-e421-4a42-aa33-8327e969136a","Type":"ContainerDied","Data":"3ec12ad556857fa65e6e86aaa472059dc6f02b6388d54a2c9e8cfe67effb81fe"} Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.194917 4900 scope.go:117] "RemoveContainer" containerID="ff3ac518790cf8fbd8982de00a235d12d7efbf4ee948c875398639254a43e04b" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.195165 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.209241 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4b3481e-e421-4a42-aa33-8327e969136a" (UID: "b4b3481e-e421-4a42-aa33-8327e969136a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.269354 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.269392 4900 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.356147 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-config-data" (OuterVolumeSpecName: "config-data") pod "b4b3481e-e421-4a42-aa33-8327e969136a" (UID: "b4b3481e-e421-4a42-aa33-8327e969136a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.362889 4900 scope.go:117] "RemoveContainer" containerID="75e25f1e3674dd01cabc41f4a3ee2dd01dee96ea3a87dba6384183400cb73aef" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.372931 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4b3481e-e421-4a42-aa33-8327e969136a-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.437298 4900 scope.go:117] "RemoveContainer" containerID="10901faaf20beb816b152adc3f9842c06d6ad3703b05633fee9cd9087d2cd11d" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.479891 4900 scope.go:117] "RemoveContainer" containerID="986a18ba90d6b0ae2c07214f57514e5accfd9b71cc3a1c965aaed030fd5c5956" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.547743 4900 scope.go:117] "RemoveContainer" containerID="ff3ac518790cf8fbd8982de00a235d12d7efbf4ee948c875398639254a43e04b" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.548766 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:56:03 crc kubenswrapper[4900]: E0127 12:56:03.549450 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff3ac518790cf8fbd8982de00a235d12d7efbf4ee948c875398639254a43e04b\": container with ID starting with ff3ac518790cf8fbd8982de00a235d12d7efbf4ee948c875398639254a43e04b not found: ID does not exist" containerID="ff3ac518790cf8fbd8982de00a235d12d7efbf4ee948c875398639254a43e04b" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.549505 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff3ac518790cf8fbd8982de00a235d12d7efbf4ee948c875398639254a43e04b"} err="failed to get container status \"ff3ac518790cf8fbd8982de00a235d12d7efbf4ee948c875398639254a43e04b\": rpc error: code = NotFound desc = could not find container \"ff3ac518790cf8fbd8982de00a235d12d7efbf4ee948c875398639254a43e04b\": container with ID starting with ff3ac518790cf8fbd8982de00a235d12d7efbf4ee948c875398639254a43e04b not found: ID does not exist" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.549543 4900 scope.go:117] "RemoveContainer" containerID="75e25f1e3674dd01cabc41f4a3ee2dd01dee96ea3a87dba6384183400cb73aef" Jan 27 12:56:03 crc kubenswrapper[4900]: E0127 12:56:03.549899 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75e25f1e3674dd01cabc41f4a3ee2dd01dee96ea3a87dba6384183400cb73aef\": container with ID starting with 75e25f1e3674dd01cabc41f4a3ee2dd01dee96ea3a87dba6384183400cb73aef not found: ID does not exist" containerID="75e25f1e3674dd01cabc41f4a3ee2dd01dee96ea3a87dba6384183400cb73aef" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.550003 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75e25f1e3674dd01cabc41f4a3ee2dd01dee96ea3a87dba6384183400cb73aef"} err="failed to get container status \"75e25f1e3674dd01cabc41f4a3ee2dd01dee96ea3a87dba6384183400cb73aef\": rpc error: code = NotFound desc = could not find container \"75e25f1e3674dd01cabc41f4a3ee2dd01dee96ea3a87dba6384183400cb73aef\": container with ID starting with 75e25f1e3674dd01cabc41f4a3ee2dd01dee96ea3a87dba6384183400cb73aef not found: ID does not exist" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.550124 4900 scope.go:117] "RemoveContainer" containerID="10901faaf20beb816b152adc3f9842c06d6ad3703b05633fee9cd9087d2cd11d" Jan 27 12:56:03 crc kubenswrapper[4900]: E0127 12:56:03.550460 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10901faaf20beb816b152adc3f9842c06d6ad3703b05633fee9cd9087d2cd11d\": container with ID starting with 10901faaf20beb816b152adc3f9842c06d6ad3703b05633fee9cd9087d2cd11d not found: ID does not exist" containerID="10901faaf20beb816b152adc3f9842c06d6ad3703b05633fee9cd9087d2cd11d" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.550492 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10901faaf20beb816b152adc3f9842c06d6ad3703b05633fee9cd9087d2cd11d"} err="failed to get container status \"10901faaf20beb816b152adc3f9842c06d6ad3703b05633fee9cd9087d2cd11d\": rpc error: code = NotFound desc = could not find container \"10901faaf20beb816b152adc3f9842c06d6ad3703b05633fee9cd9087d2cd11d\": container with ID starting with 10901faaf20beb816b152adc3f9842c06d6ad3703b05633fee9cd9087d2cd11d not found: ID does not exist" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.550506 4900 scope.go:117] "RemoveContainer" containerID="986a18ba90d6b0ae2c07214f57514e5accfd9b71cc3a1c965aaed030fd5c5956" Jan 27 12:56:03 crc kubenswrapper[4900]: E0127 12:56:03.551462 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"986a18ba90d6b0ae2c07214f57514e5accfd9b71cc3a1c965aaed030fd5c5956\": container with ID starting with 986a18ba90d6b0ae2c07214f57514e5accfd9b71cc3a1c965aaed030fd5c5956 not found: ID does not exist" containerID="986a18ba90d6b0ae2c07214f57514e5accfd9b71cc3a1c965aaed030fd5c5956" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.551483 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"986a18ba90d6b0ae2c07214f57514e5accfd9b71cc3a1c965aaed030fd5c5956"} err="failed to get container status \"986a18ba90d6b0ae2c07214f57514e5accfd9b71cc3a1c965aaed030fd5c5956\": rpc error: code = NotFound desc = could not find container \"986a18ba90d6b0ae2c07214f57514e5accfd9b71cc3a1c965aaed030fd5c5956\": container with ID starting with 986a18ba90d6b0ae2c07214f57514e5accfd9b71cc3a1c965aaed030fd5c5956 not found: ID does not exist" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.614643 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.696677 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:56:03 crc kubenswrapper[4900]: E0127 12:56:03.697812 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="ceilometer-central-agent" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.697848 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="ceilometer-central-agent" Jan 27 12:56:03 crc kubenswrapper[4900]: E0127 12:56:03.697881 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="sg-core" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.697892 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="sg-core" Jan 27 12:56:03 crc kubenswrapper[4900]: E0127 12:56:03.697920 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="ceilometer-notification-agent" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.697934 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="ceilometer-notification-agent" Jan 27 12:56:03 crc kubenswrapper[4900]: E0127 12:56:03.697967 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="proxy-httpd" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.697977 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="proxy-httpd" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.698410 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="sg-core" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.699335 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="ceilometer-notification-agent" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.699441 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="ceilometer-central-agent" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.699509 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" containerName="proxy-httpd" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.717591 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.726201 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.726285 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.726401 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.734442 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.838815 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/780131af-30a7-406a-8ae9-b9a3a0826d1e-run-httpd\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.838888 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/780131af-30a7-406a-8ae9-b9a3a0826d1e-log-httpd\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.838939 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-scripts\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.838974 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq4rz\" (UniqueName: \"kubernetes.io/projected/780131af-30a7-406a-8ae9-b9a3a0826d1e-kube-api-access-fq4rz\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.839221 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.839260 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-config-data\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.839296 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.839322 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.945430 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/780131af-30a7-406a-8ae9-b9a3a0826d1e-run-httpd\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.945568 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/780131af-30a7-406a-8ae9-b9a3a0826d1e-log-httpd\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.945679 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-scripts\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.945733 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq4rz\" (UniqueName: \"kubernetes.io/projected/780131af-30a7-406a-8ae9-b9a3a0826d1e-kube-api-access-fq4rz\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.945998 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.947200 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-config-data\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.947283 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.947393 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.950086 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/780131af-30a7-406a-8ae9-b9a3a0826d1e-run-httpd\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.950816 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/780131af-30a7-406a-8ae9-b9a3a0826d1e-log-httpd\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.956151 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.961533 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-config-data\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.963430 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.969304 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.981484 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/780131af-30a7-406a-8ae9-b9a3a0826d1e-scripts\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:03 crc kubenswrapper[4900]: I0127 12:56:03.991285 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq4rz\" (UniqueName: \"kubernetes.io/projected/780131af-30a7-406a-8ae9-b9a3a0826d1e-kube-api-access-fq4rz\") pod \"ceilometer-0\" (UID: \"780131af-30a7-406a-8ae9-b9a3a0826d1e\") " pod="openstack/ceilometer-0" Jan 27 12:56:04 crc kubenswrapper[4900]: I0127 12:56:04.082749 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 27 12:56:04 crc kubenswrapper[4900]: I0127 12:56:04.511845 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4b3481e-e421-4a42-aa33-8327e969136a" path="/var/lib/kubelet/pods/b4b3481e-e421-4a42-aa33-8327e969136a/volumes" Jan 27 12:56:04 crc kubenswrapper[4900]: I0127 12:56:04.777287 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 27 12:56:04 crc kubenswrapper[4900]: I0127 12:56:04.866829 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="c4594c71-599f-4576-bf95-303da1436ca4" containerName="rabbitmq" containerID="cri-o://71cf4529469023f91cd081cde6cf22cc1dfd82d4ddc1ef4ac3a542a9e983a108" gracePeriod=604794 Jan 27 12:56:05 crc kubenswrapper[4900]: I0127 12:56:05.248205 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"780131af-30a7-406a-8ae9-b9a3a0826d1e","Type":"ContainerStarted","Data":"5c7b824eae1be11db88b7b17cb7a8af979dac52cf6223362755eed84df12c9c8"} Jan 27 12:56:05 crc kubenswrapper[4900]: I0127 12:56:05.623844 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:56:05 crc kubenswrapper[4900]: E0127 12:56:05.624202 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:56:09 crc kubenswrapper[4900]: I0127 12:56:09.317002 4900 generic.go:334] "Generic (PLEG): container finished" podID="9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" containerID="d61edc48b89987cfbdf4d9a6002014c3ed5eed4ee311dd316a4924ab0d5efdbd" exitCode=0 Jan 27 12:56:09 crc kubenswrapper[4900]: I0127 12:56:09.317535 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4","Type":"ContainerDied","Data":"d61edc48b89987cfbdf4d9a6002014c3ed5eed4ee311dd316a4924ab0d5efdbd"} Jan 27 12:56:10 crc kubenswrapper[4900]: I0127 12:56:10.865569 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Jan 27 12:56:10 crc kubenswrapper[4900]: E0127 12:56:10.970364 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6b34fa5_f4ff_4e7e_bdd5_9347ed9dda70.slice\": RecentStats: unable to find data in memory cache]" Jan 27 12:56:11 crc kubenswrapper[4900]: I0127 12:56:11.298517 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="c4594c71-599f-4576-bf95-303da1436ca4" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.132:5671: connect: connection refused" Jan 27 12:56:12 crc kubenswrapper[4900]: I0127 12:56:12.378110 4900 generic.go:334] "Generic (PLEG): container finished" podID="c4594c71-599f-4576-bf95-303da1436ca4" containerID="71cf4529469023f91cd081cde6cf22cc1dfd82d4ddc1ef4ac3a542a9e983a108" exitCode=0 Jan 27 12:56:12 crc kubenswrapper[4900]: I0127 12:56:12.378194 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c4594c71-599f-4576-bf95-303da1436ca4","Type":"ContainerDied","Data":"71cf4529469023f91cd081cde6cf22cc1dfd82d4ddc1ef4ac3a542a9e983a108"} Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.586136 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68df85789f-thkx7"] Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.590223 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.597705 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.621859 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-thkx7"] Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.728732 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktbwl\" (UniqueName: \"kubernetes.io/projected/98b16fd2-6760-4167-9c87-5e77e5490d28-kube-api-access-ktbwl\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.736510 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.736649 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.737088 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-dns-svc\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.737305 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-config\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.737352 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.737575 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.839604 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-config\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.839673 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.839741 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.839794 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktbwl\" (UniqueName: \"kubernetes.io/projected/98b16fd2-6760-4167-9c87-5e77e5490d28-kube-api-access-ktbwl\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.839817 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.839868 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.839958 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-dns-svc\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.841174 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-dns-svc\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.841392 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.841705 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-config\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.841836 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.842423 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.842555 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.904525 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-thkx7"] Jan 27 12:56:14 crc kubenswrapper[4900]: E0127 12:56:14.905962 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-ktbwl], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-68df85789f-thkx7" podUID="98b16fd2-6760-4167-9c87-5e77e5490d28" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.908570 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktbwl\" (UniqueName: \"kubernetes.io/projected/98b16fd2-6760-4167-9c87-5e77e5490d28-kube-api-access-ktbwl\") pod \"dnsmasq-dns-68df85789f-thkx7\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.961388 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-z6xsp"] Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.963777 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:14 crc kubenswrapper[4900]: I0127 12:56:14.982658 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-z6xsp"] Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.054879 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.055002 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.055042 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnhb5\" (UniqueName: \"kubernetes.io/projected/b63e2e42-d12b-451e-a055-33abd597ddcd-kube-api-access-gnhb5\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.055094 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-dns-svc\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.055130 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-config\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.055193 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.055269 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.158517 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.158683 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.158731 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnhb5\" (UniqueName: \"kubernetes.io/projected/b63e2e42-d12b-451e-a055-33abd597ddcd-kube-api-access-gnhb5\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.158771 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-dns-svc\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.158813 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-config\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.158886 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.158977 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.159923 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.160024 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.164224 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.164264 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-config\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.164742 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.165013 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b63e2e42-d12b-451e-a055-33abd597ddcd-dns-svc\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.208470 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnhb5\" (UniqueName: \"kubernetes.io/projected/b63e2e42-d12b-451e-a055-33abd597ddcd-kube-api-access-gnhb5\") pod \"dnsmasq-dns-bb85b8995-z6xsp\" (UID: \"b63e2e42-d12b-451e-a055-33abd597ddcd\") " pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: E0127 12:56:15.258651 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6b34fa5_f4ff_4e7e_bdd5_9347ed9dda70.slice\": RecentStats: unable to find data in memory cache]" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.301491 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.427448 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.538126 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.673954 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ktbwl\" (UniqueName: \"kubernetes.io/projected/98b16fd2-6760-4167-9c87-5e77e5490d28-kube-api-access-ktbwl\") pod \"98b16fd2-6760-4167-9c87-5e77e5490d28\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.674102 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-dns-swift-storage-0\") pod \"98b16fd2-6760-4167-9c87-5e77e5490d28\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.674203 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-ovsdbserver-sb\") pod \"98b16fd2-6760-4167-9c87-5e77e5490d28\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.674287 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-ovsdbserver-nb\") pod \"98b16fd2-6760-4167-9c87-5e77e5490d28\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.674330 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-config\") pod \"98b16fd2-6760-4167-9c87-5e77e5490d28\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.674575 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-openstack-edpm-ipam\") pod \"98b16fd2-6760-4167-9c87-5e77e5490d28\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.674605 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-dns-svc\") pod \"98b16fd2-6760-4167-9c87-5e77e5490d28\" (UID: \"98b16fd2-6760-4167-9c87-5e77e5490d28\") " Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.675276 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "98b16fd2-6760-4167-9c87-5e77e5490d28" (UID: "98b16fd2-6760-4167-9c87-5e77e5490d28"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.675424 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "98b16fd2-6760-4167-9c87-5e77e5490d28" (UID: "98b16fd2-6760-4167-9c87-5e77e5490d28"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.675449 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "98b16fd2-6760-4167-9c87-5e77e5490d28" (UID: "98b16fd2-6760-4167-9c87-5e77e5490d28"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.675799 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "98b16fd2-6760-4167-9c87-5e77e5490d28" (UID: "98b16fd2-6760-4167-9c87-5e77e5490d28"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.676066 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "98b16fd2-6760-4167-9c87-5e77e5490d28" (UID: "98b16fd2-6760-4167-9c87-5e77e5490d28"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.677202 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-config" (OuterVolumeSpecName: "config") pod "98b16fd2-6760-4167-9c87-5e77e5490d28" (UID: "98b16fd2-6760-4167-9c87-5e77e5490d28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.679194 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98b16fd2-6760-4167-9c87-5e77e5490d28-kube-api-access-ktbwl" (OuterVolumeSpecName: "kube-api-access-ktbwl") pod "98b16fd2-6760-4167-9c87-5e77e5490d28" (UID: "98b16fd2-6760-4167-9c87-5e77e5490d28"). InnerVolumeSpecName "kube-api-access-ktbwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.777981 4900 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.778020 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.778030 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.778039 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.778049 4900 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.778071 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98b16fd2-6760-4167-9c87-5e77e5490d28-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:15 crc kubenswrapper[4900]: I0127 12:56:15.778079 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ktbwl\" (UniqueName: \"kubernetes.io/projected/98b16fd2-6760-4167-9c87-5e77e5490d28-kube-api-access-ktbwl\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:16 crc kubenswrapper[4900]: I0127 12:56:16.442699 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-thkx7" Jan 27 12:56:16 crc kubenswrapper[4900]: I0127 12:56:16.529423 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-thkx7"] Jan 27 12:56:16 crc kubenswrapper[4900]: I0127 12:56:16.545752 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-thkx7"] Jan 27 12:56:18 crc kubenswrapper[4900]: I0127 12:56:18.482448 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:56:18 crc kubenswrapper[4900]: E0127 12:56:18.484074 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:56:18 crc kubenswrapper[4900]: I0127 12:56:18.507835 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98b16fd2-6760-4167-9c87-5e77e5490d28" path="/var/lib/kubelet/pods/98b16fd2-6760-4167-9c87-5e77e5490d28/volumes" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.108877 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.120250 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.202681 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c4594c71-599f-4576-bf95-303da1436ca4-pod-info\") pod \"c4594c71-599f-4576-bf95-303da1436ca4\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.202803 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkhpw\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-kube-api-access-qkhpw\") pod \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.202839 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-config-data\") pod \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.202967 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-plugins-conf\") pod \"c4594c71-599f-4576-bf95-303da1436ca4\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.203089 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-config-data\") pod \"c4594c71-599f-4576-bf95-303da1436ca4\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.203148 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-erlang-cookie\") pod \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.203183 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lgfkj\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-kube-api-access-lgfkj\") pod \"c4594c71-599f-4576-bf95-303da1436ca4\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.203214 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-pod-info\") pod \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.204037 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\") pod \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.204137 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-erlang-cookie\") pod \"c4594c71-599f-4576-bf95-303da1436ca4\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.204174 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-plugins\") pod \"c4594c71-599f-4576-bf95-303da1436ca4\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.204276 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-plugins-conf\") pod \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.204355 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-tls\") pod \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.204464 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-tls\") pod \"c4594c71-599f-4576-bf95-303da1436ca4\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.204510 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-confd\") pod \"c4594c71-599f-4576-bf95-303da1436ca4\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.204583 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-server-conf\") pod \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.204665 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-server-conf\") pod \"c4594c71-599f-4576-bf95-303da1436ca4\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.218273 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/c4594c71-599f-4576-bf95-303da1436ca4-pod-info" (OuterVolumeSpecName: "pod-info") pod "c4594c71-599f-4576-bf95-303da1436ca4" (UID: "c4594c71-599f-4576-bf95-303da1436ca4"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.232508 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" (UID: "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.233288 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "c4594c71-599f-4576-bf95-303da1436ca4" (UID: "c4594c71-599f-4576-bf95-303da1436ca4"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.233469 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" (UID: "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.234264 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\") pod \"c4594c71-599f-4576-bf95-303da1436ca4\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.234373 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c4594c71-599f-4576-bf95-303da1436ca4-erlang-cookie-secret\") pod \"c4594c71-599f-4576-bf95-303da1436ca4\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.234447 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-erlang-cookie-secret\") pod \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.234426 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-kube-api-access-qkhpw" (OuterVolumeSpecName: "kube-api-access-qkhpw") pod "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" (UID: "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4"). InnerVolumeSpecName "kube-api-access-qkhpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.234508 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-plugins\") pod \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.234570 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-confd\") pod \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\" (UID: \"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4\") " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.236382 4900 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c4594c71-599f-4576-bf95-303da1436ca4-pod-info\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.236406 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkhpw\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-kube-api-access-qkhpw\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.236419 4900 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.236428 4900 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.236438 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.239525 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" (UID: "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.240781 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" (UID: "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.247406 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "c4594c71-599f-4576-bf95-303da1436ca4" (UID: "c4594c71-599f-4576-bf95-303da1436ca4"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.247537 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "c4594c71-599f-4576-bf95-303da1436ca4" (UID: "c4594c71-599f-4576-bf95-303da1436ca4"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.281745 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-pod-info" (OuterVolumeSpecName: "pod-info") pod "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" (UID: "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.285306 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" (UID: "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.330363 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-kube-api-access-lgfkj" (OuterVolumeSpecName: "kube-api-access-lgfkj") pod "c4594c71-599f-4576-bf95-303da1436ca4" (UID: "c4594c71-599f-4576-bf95-303da1436ca4"). InnerVolumeSpecName "kube-api-access-lgfkj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.334846 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "c4594c71-599f-4576-bf95-303da1436ca4" (UID: "c4594c71-599f-4576-bf95-303da1436ca4"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.338047 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4594c71-599f-4576-bf95-303da1436ca4-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "c4594c71-599f-4576-bf95-303da1436ca4" (UID: "c4594c71-599f-4576-bf95-303da1436ca4"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.345174 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f" (OuterVolumeSpecName: "persistence") pod "c4594c71-599f-4576-bf95-303da1436ca4" (UID: "c4594c71-599f-4576-bf95-303da1436ca4"). InnerVolumeSpecName "pvc-993b10ca-8ab6-4585-b71b-169129a75d7f". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: E0127 12:56:19.345530 4900 reconciler_common.go:156] "operationExecutor.UnmountVolume failed (controllerAttachDetachEnabled true) for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\") pod \"c4594c71-599f-4576-bf95-303da1436ca4\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") : UnmountVolume.NewUnmounter failed for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\") pod \"c4594c71-599f-4576-bf95-303da1436ca4\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") : kubernetes.io/csi: unmounter failed to load volume data file [/var/lib/kubelet/pods/c4594c71-599f-4576-bf95-303da1436ca4/volumes/kubernetes.io~csi/pvc-993b10ca-8ab6-4585-b71b-169129a75d7f/mount]: kubernetes.io/csi: failed to open volume data file [/var/lib/kubelet/pods/c4594c71-599f-4576-bf95-303da1436ca4/volumes/kubernetes.io~csi/pvc-993b10ca-8ab6-4585-b71b-169129a75d7f/vol_data.json]: open /var/lib/kubelet/pods/c4594c71-599f-4576-bf95-303da1436ca4/volumes/kubernetes.io~csi/pvc-993b10ca-8ab6-4585-b71b-169129a75d7f/vol_data.json: no such file or directory" err="UnmountVolume.NewUnmounter failed for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\") pod \"c4594c71-599f-4576-bf95-303da1436ca4\" (UID: \"c4594c71-599f-4576-bf95-303da1436ca4\") : kubernetes.io/csi: unmounter failed to load volume data file [/var/lib/kubelet/pods/c4594c71-599f-4576-bf95-303da1436ca4/volumes/kubernetes.io~csi/pvc-993b10ca-8ab6-4585-b71b-169129a75d7f/mount]: kubernetes.io/csi: failed to open volume data file [/var/lib/kubelet/pods/c4594c71-599f-4576-bf95-303da1436ca4/volumes/kubernetes.io~csi/pvc-993b10ca-8ab6-4585-b71b-169129a75d7f/vol_data.json]: open /var/lib/kubelet/pods/c4594c71-599f-4576-bf95-303da1436ca4/volumes/kubernetes.io~csi/pvc-993b10ca-8ab6-4585-b71b-169129a75d7f/vol_data.json: no such file or directory" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.346706 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-config-data" (OuterVolumeSpecName: "config-data") pod "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" (UID: "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.351764 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.351812 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lgfkj\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-kube-api-access-lgfkj\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.351828 4900 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-pod-info\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.351846 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.351860 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.351873 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.351932 4900 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\") on node \"crc\" " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.351953 4900 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c4594c71-599f-4576-bf95-303da1436ca4-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.351968 4900 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.351981 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.351997 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.358950 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32740445-fc24-46e9-95d3-2a83a12efdb6" (OuterVolumeSpecName: "persistence") pod "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" (UID: "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4"). InnerVolumeSpecName "pvc-32740445-fc24-46e9-95d3-2a83a12efdb6". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.401911 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-config-data" (OuterVolumeSpecName: "config-data") pod "c4594c71-599f-4576-bf95-303da1436ca4" (UID: "c4594c71-599f-4576-bf95-303da1436ca4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.448201 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-server-conf" (OuterVolumeSpecName: "server-conf") pod "c4594c71-599f-4576-bf95-303da1436ca4" (UID: "c4594c71-599f-4576-bf95-303da1436ca4"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.455977 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.456051 4900 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\") on node \"crc\" " Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.456239 4900 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c4594c71-599f-4576-bf95-303da1436ca4-server-conf\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.459689 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-server-conf" (OuterVolumeSpecName: "server-conf") pod "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" (UID: "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.473232 4900 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.473455 4900 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-993b10ca-8ab6-4585-b71b-169129a75d7f" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f") on node "crc" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.528273 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"9cd313ac-a7d9-4d00-9fd3-6b7950a928e4","Type":"ContainerDied","Data":"dd8ee40b0e543e8c143b05564a96072974a33a652a73228c7ab18a504b6f1ff4"} Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.528337 4900 scope.go:117] "RemoveContainer" containerID="d61edc48b89987cfbdf4d9a6002014c3ed5eed4ee311dd316a4924ab0d5efdbd" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.529217 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.538684 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c4594c71-599f-4576-bf95-303da1436ca4","Type":"ContainerDied","Data":"f07de842d7c649658cb9dc9947031c513ab246925a2209ecd3c9ef11ecc4cfab"} Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.538920 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.543175 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "c4594c71-599f-4576-bf95-303da1436ca4" (UID: "c4594c71-599f-4576-bf95-303da1436ca4"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.559480 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c4594c71-599f-4576-bf95-303da1436ca4-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.559952 4900 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-server-conf\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.560075 4900 reconciler_common.go:293] "Volume detached for volume \"pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.568120 4900 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.568771 4900 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-32740445-fc24-46e9-95d3-2a83a12efdb6" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32740445-fc24-46e9-95d3-2a83a12efdb6") on node "crc" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.588342 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" (UID: "9cd313ac-a7d9-4d00-9fd3-6b7950a928e4"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.667769 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.667809 4900 reconciler_common.go:293] "Volume detached for volume \"pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.898537 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.918757 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.935704 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.958581 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.978887 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 12:56:19 crc kubenswrapper[4900]: E0127 12:56:19.979971 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" containerName="rabbitmq" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.980005 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" containerName="rabbitmq" Jan 27 12:56:19 crc kubenswrapper[4900]: E0127 12:56:19.980107 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4594c71-599f-4576-bf95-303da1436ca4" containerName="setup-container" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.980120 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4594c71-599f-4576-bf95-303da1436ca4" containerName="setup-container" Jan 27 12:56:19 crc kubenswrapper[4900]: E0127 12:56:19.980147 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" containerName="setup-container" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.980159 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" containerName="setup-container" Jan 27 12:56:19 crc kubenswrapper[4900]: E0127 12:56:19.980197 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4594c71-599f-4576-bf95-303da1436ca4" containerName="rabbitmq" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.980206 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4594c71-599f-4576-bf95-303da1436ca4" containerName="rabbitmq" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.980634 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4594c71-599f-4576-bf95-303da1436ca4" containerName="rabbitmq" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.980681 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" containerName="rabbitmq" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.985544 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.998977 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.999420 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.999742 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 27 12:56:19 crc kubenswrapper[4900]: I0127 12:56:19.999919 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.000253 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-fszws" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.000470 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.000661 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.059343 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-2"] Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.073274 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.091766 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f460e552-e35d-44f9-9041-82280a4a840e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.095552 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f460e552-e35d-44f9-9041-82280a4a840e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.095793 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f460e552-e35d-44f9-9041-82280a4a840e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.095899 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpl9b\" (UniqueName: \"kubernetes.io/projected/f460e552-e35d-44f9-9041-82280a4a840e-kube-api-access-gpl9b\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.097770 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f460e552-e35d-44f9-9041-82280a4a840e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.125248 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.132439 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f460e552-e35d-44f9-9041-82280a4a840e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.132831 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f460e552-e35d-44f9-9041-82280a4a840e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.132932 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.133028 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f460e552-e35d-44f9-9041-82280a4a840e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.133106 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f460e552-e35d-44f9-9041-82280a4a840e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.133177 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f460e552-e35d-44f9-9041-82280a4a840e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.175997 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.236951 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b7437170-ba39-40c1-a876-4860f350d1e6-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.237142 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f460e552-e35d-44f9-9041-82280a4a840e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.237265 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.237337 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f460e552-e35d-44f9-9041-82280a4a840e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.237380 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b7437170-ba39-40c1-a876-4860f350d1e6-server-conf\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.238983 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f460e552-e35d-44f9-9041-82280a4a840e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.239050 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.239166 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f460e552-e35d-44f9-9041-82280a4a840e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.239281 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f460e552-e35d-44f9-9041-82280a4a840e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.240352 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f460e552-e35d-44f9-9041-82280a4a840e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.240423 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b7437170-ba39-40c1-a876-4860f350d1e6-config-data\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.240456 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b7437170-ba39-40c1-a876-4860f350d1e6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.240572 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f460e552-e35d-44f9-9041-82280a4a840e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.240630 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpl9b\" (UniqueName: \"kubernetes.io/projected/f460e552-e35d-44f9-9041-82280a4a840e-kube-api-access-gpl9b\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.240717 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b7437170-ba39-40c1-a876-4860f350d1e6-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.240762 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b7437170-ba39-40c1-a876-4860f350d1e6-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.240900 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qwrj\" (UniqueName: \"kubernetes.io/projected/b7437170-ba39-40c1-a876-4860f350d1e6-kube-api-access-5qwrj\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.240994 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b7437170-ba39-40c1-a876-4860f350d1e6-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.241040 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f460e552-e35d-44f9-9041-82280a4a840e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.241102 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b7437170-ba39-40c1-a876-4860f350d1e6-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.241234 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f460e552-e35d-44f9-9041-82280a4a840e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.241294 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b7437170-ba39-40c1-a876-4860f350d1e6-pod-info\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.242593 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f460e552-e35d-44f9-9041-82280a4a840e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.242642 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f460e552-e35d-44f9-9041-82280a4a840e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.243612 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f460e552-e35d-44f9-9041-82280a4a840e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.251022 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f460e552-e35d-44f9-9041-82280a4a840e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.258195 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f460e552-e35d-44f9-9041-82280a4a840e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.261346 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f460e552-e35d-44f9-9041-82280a4a840e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.261420 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f460e552-e35d-44f9-9041-82280a4a840e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.264992 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f460e552-e35d-44f9-9041-82280a4a840e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.265505 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpl9b\" (UniqueName: \"kubernetes.io/projected/f460e552-e35d-44f9-9041-82280a4a840e-kube-api-access-gpl9b\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.265864 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.265899 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/5a6b33fd526ee85a4a24fe8833d231aad71709cc2cd25719d061ac882771bde1/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.269479 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f460e552-e35d-44f9-9041-82280a4a840e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.343886 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b7437170-ba39-40c1-a876-4860f350d1e6-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.343946 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b7437170-ba39-40c1-a876-4860f350d1e6-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.344006 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qwrj\" (UniqueName: \"kubernetes.io/projected/b7437170-ba39-40c1-a876-4860f350d1e6-kube-api-access-5qwrj\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.344073 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b7437170-ba39-40c1-a876-4860f350d1e6-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.344106 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b7437170-ba39-40c1-a876-4860f350d1e6-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.344160 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b7437170-ba39-40c1-a876-4860f350d1e6-pod-info\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.344235 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b7437170-ba39-40c1-a876-4860f350d1e6-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.344321 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b7437170-ba39-40c1-a876-4860f350d1e6-server-conf\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.344358 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.344440 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b7437170-ba39-40c1-a876-4860f350d1e6-config-data\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.344463 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b7437170-ba39-40c1-a876-4860f350d1e6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.345079 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b7437170-ba39-40c1-a876-4860f350d1e6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.345396 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b7437170-ba39-40c1-a876-4860f350d1e6-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.346984 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.347016 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/a28b60198351fffe37bd5f288973b6a03f7909d229e7151274d0b3aa1c852f26/globalmount\"" pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.347144 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b7437170-ba39-40c1-a876-4860f350d1e6-config-data\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.348590 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b7437170-ba39-40c1-a876-4860f350d1e6-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.350139 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b7437170-ba39-40c1-a876-4860f350d1e6-server-conf\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.350195 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b7437170-ba39-40c1-a876-4860f350d1e6-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.351597 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b7437170-ba39-40c1-a876-4860f350d1e6-pod-info\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.360944 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b7437170-ba39-40c1-a876-4860f350d1e6-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.363677 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b7437170-ba39-40c1-a876-4860f350d1e6-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.370751 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qwrj\" (UniqueName: \"kubernetes.io/projected/b7437170-ba39-40c1-a876-4860f350d1e6-kube-api-access-5qwrj\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.398430 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-993b10ca-8ab6-4585-b71b-169129a75d7f\") pod \"rabbitmq-cell1-server-0\" (UID: \"f460e552-e35d-44f9-9041-82280a4a840e\") " pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.603870 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32740445-fc24-46e9-95d3-2a83a12efdb6\") pod \"rabbitmq-server-2\" (UID: \"b7437170-ba39-40c1-a876-4860f350d1e6\") " pod="openstack/rabbitmq-server-2" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.614571 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cd313ac-a7d9-4d00-9fd3-6b7950a928e4" path="/var/lib/kubelet/pods/9cd313ac-a7d9-4d00-9fd3-6b7950a928e4/volumes" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.616219 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4594c71-599f-4576-bf95-303da1436ca4" path="/var/lib/kubelet/pods/c4594c71-599f-4576-bf95-303da1436ca4/volumes" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.675000 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:56:20 crc kubenswrapper[4900]: I0127 12:56:20.716168 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 27 12:56:26 crc kubenswrapper[4900]: I0127 12:56:26.869483 4900 scope.go:117] "RemoveContainer" containerID="67576cb1aa6e3a772fd86cf622769d1edc10f4bad80bb0919389db6d1691721a" Jan 27 12:56:30 crc kubenswrapper[4900]: I0127 12:56:30.500211 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:56:30 crc kubenswrapper[4900]: E0127 12:56:30.500857 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:56:32 crc kubenswrapper[4900]: E0127 12:56:32.011866 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Jan 27 12:56:32 crc kubenswrapper[4900]: E0127 12:56:32.012284 4900 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Jan 27 12:56:32 crc kubenswrapper[4900]: E0127 12:56:32.012482 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kzsls,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-z6j69_openstack(01b46bc7-748f-4872-abb3-a7faef291c0b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:56:32 crc kubenswrapper[4900]: E0127 12:56:32.015922 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-z6j69" podUID="01b46bc7-748f-4872-abb3-a7faef291c0b" Jan 27 12:56:32 crc kubenswrapper[4900]: E0127 12:56:32.829145 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-z6j69" podUID="01b46bc7-748f-4872-abb3-a7faef291c0b" Jan 27 12:56:33 crc kubenswrapper[4900]: I0127 12:56:33.585537 4900 scope.go:117] "RemoveContainer" containerID="6c04151f3bb23a39db7a3351f189d2470c0bb62e4146259b779155de735bdd7b" Jan 27 12:56:33 crc kubenswrapper[4900]: I0127 12:56:33.945112 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-z6xsp"] Jan 27 12:56:35 crc kubenswrapper[4900]: I0127 12:56:35.692224 4900 scope.go:117] "RemoveContainer" containerID="fa61a06eeafaa4aebbc5a3252fd558c86a4604d5db84faa144eba4f9eb23e47b" Jan 27 12:56:35 crc kubenswrapper[4900]: W0127 12:56:35.705722 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb63e2e42_d12b_451e_a055_33abd597ddcd.slice/crio-efc17ca359fbe43c560b789a6e8003aeca9ee25a473db06789ba1fe8117412c9 WatchSource:0}: Error finding container efc17ca359fbe43c560b789a6e8003aeca9ee25a473db06789ba1fe8117412c9: Status 404 returned error can't find the container with id efc17ca359fbe43c560b789a6e8003aeca9ee25a473db06789ba1fe8117412c9 Jan 27 12:56:35 crc kubenswrapper[4900]: E0127 12:56:35.714586 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Jan 27 12:56:35 crc kubenswrapper[4900]: E0127 12:56:35.714703 4900 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Jan 27 12:56:35 crc kubenswrapper[4900]: E0127 12:56:35.714864 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nf7h67bh669h698h598h679h8fh56fh5h59ch8dh68bh67dh5f4h5ddh544h5ffh5bdh55fh8h5bch646hch5b9h56bh658h5b7hf4h8h55bh58h64dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fq4rz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(780131af-30a7-406a-8ae9-b9a3a0826d1e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 12:56:35 crc kubenswrapper[4900]: I0127 12:56:35.752257 4900 scope.go:117] "RemoveContainer" containerID="71cf4529469023f91cd081cde6cf22cc1dfd82d4ddc1ef4ac3a542a9e983a108" Jan 27 12:56:35 crc kubenswrapper[4900]: I0127 12:56:35.897685 4900 scope.go:117] "RemoveContainer" containerID="71cf4529469023f91cd081cde6cf22cc1dfd82d4ddc1ef4ac3a542a9e983a108" Jan 27 12:56:35 crc kubenswrapper[4900]: I0127 12:56:35.923883 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" event={"ID":"b63e2e42-d12b-451e-a055-33abd597ddcd","Type":"ContainerStarted","Data":"efc17ca359fbe43c560b789a6e8003aeca9ee25a473db06789ba1fe8117412c9"} Jan 27 12:56:35 crc kubenswrapper[4900]: E0127 12:56:35.958633 4900 log.go:32] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_rabbitmq_rabbitmq-cell1-server-0_openstack_c4594c71-599f-4576-bf95-303da1436ca4_0 in pod sandbox f07de842d7c649658cb9dc9947031c513ab246925a2209ecd3c9ef11ecc4cfab from index: no such id: '71cf4529469023f91cd081cde6cf22cc1dfd82d4ddc1ef4ac3a542a9e983a108'" containerID="71cf4529469023f91cd081cde6cf22cc1dfd82d4ddc1ef4ac3a542a9e983a108" Jan 27 12:56:35 crc kubenswrapper[4900]: E0127 12:56:35.959050 4900 kuberuntime_gc.go:150] "Failed to remove container" err="rpc error: code = Unknown desc = failed to delete container k8s_rabbitmq_rabbitmq-cell1-server-0_openstack_c4594c71-599f-4576-bf95-303da1436ca4_0 in pod sandbox f07de842d7c649658cb9dc9947031c513ab246925a2209ecd3c9ef11ecc4cfab from index: no such id: '71cf4529469023f91cd081cde6cf22cc1dfd82d4ddc1ef4ac3a542a9e983a108'" containerID="71cf4529469023f91cd081cde6cf22cc1dfd82d4ddc1ef4ac3a542a9e983a108" Jan 27 12:56:35 crc kubenswrapper[4900]: I0127 12:56:35.959095 4900 scope.go:117] "RemoveContainer" containerID="6c04151f3bb23a39db7a3351f189d2470c0bb62e4146259b779155de735bdd7b" Jan 27 12:56:35 crc kubenswrapper[4900]: I0127 12:56:35.959202 4900 scope.go:117] "RemoveContainer" containerID="67576cb1aa6e3a772fd86cf622769d1edc10f4bad80bb0919389db6d1691721a" Jan 27 12:56:35 crc kubenswrapper[4900]: E0127 12:56:35.965855 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c04151f3bb23a39db7a3351f189d2470c0bb62e4146259b779155de735bdd7b\": container with ID starting with 6c04151f3bb23a39db7a3351f189d2470c0bb62e4146259b779155de735bdd7b not found: ID does not exist" containerID="6c04151f3bb23a39db7a3351f189d2470c0bb62e4146259b779155de735bdd7b" Jan 27 12:56:35 crc kubenswrapper[4900]: E0127 12:56:35.965911 4900 kuberuntime_gc.go:150] "Failed to remove container" err="failed to get container status \"6c04151f3bb23a39db7a3351f189d2470c0bb62e4146259b779155de735bdd7b\": rpc error: code = NotFound desc = could not find container \"6c04151f3bb23a39db7a3351f189d2470c0bb62e4146259b779155de735bdd7b\": container with ID starting with 6c04151f3bb23a39db7a3351f189d2470c0bb62e4146259b779155de735bdd7b not found: ID does not exist" containerID="6c04151f3bb23a39db7a3351f189d2470c0bb62e4146259b779155de735bdd7b" Jan 27 12:56:35 crc kubenswrapper[4900]: I0127 12:56:35.966350 4900 scope.go:117] "RemoveContainer" containerID="9eee290fb95365149a8fe071c30f618e894e87ff3fbd0514771216a845c4333a" Jan 27 12:56:35 crc kubenswrapper[4900]: E0127 12:56:35.969800 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67576cb1aa6e3a772fd86cf622769d1edc10f4bad80bb0919389db6d1691721a\": container with ID starting with 67576cb1aa6e3a772fd86cf622769d1edc10f4bad80bb0919389db6d1691721a not found: ID does not exist" containerID="67576cb1aa6e3a772fd86cf622769d1edc10f4bad80bb0919389db6d1691721a" Jan 27 12:56:35 crc kubenswrapper[4900]: I0127 12:56:35.969846 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67576cb1aa6e3a772fd86cf622769d1edc10f4bad80bb0919389db6d1691721a"} err="failed to get container status \"67576cb1aa6e3a772fd86cf622769d1edc10f4bad80bb0919389db6d1691721a\": rpc error: code = NotFound desc = could not find container \"67576cb1aa6e3a772fd86cf622769d1edc10f4bad80bb0919389db6d1691721a\": container with ID starting with 67576cb1aa6e3a772fd86cf622769d1edc10f4bad80bb0919389db6d1691721a not found: ID does not exist" Jan 27 12:56:36 crc kubenswrapper[4900]: I0127 12:56:36.012998 4900 scope.go:117] "RemoveContainer" containerID="d61edc48b89987cfbdf4d9a6002014c3ed5eed4ee311dd316a4924ab0d5efdbd" Jan 27 12:56:36 crc kubenswrapper[4900]: E0127 12:56:36.013809 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d61edc48b89987cfbdf4d9a6002014c3ed5eed4ee311dd316a4924ab0d5efdbd\": container with ID starting with d61edc48b89987cfbdf4d9a6002014c3ed5eed4ee311dd316a4924ab0d5efdbd not found: ID does not exist" containerID="d61edc48b89987cfbdf4d9a6002014c3ed5eed4ee311dd316a4924ab0d5efdbd" Jan 27 12:56:36 crc kubenswrapper[4900]: E0127 12:56:36.013879 4900 kuberuntime_gc.go:150] "Failed to remove container" err="failed to get container status \"d61edc48b89987cfbdf4d9a6002014c3ed5eed4ee311dd316a4924ab0d5efdbd\": rpc error: code = NotFound desc = could not find container \"d61edc48b89987cfbdf4d9a6002014c3ed5eed4ee311dd316a4924ab0d5efdbd\": container with ID starting with d61edc48b89987cfbdf4d9a6002014c3ed5eed4ee311dd316a4924ab0d5efdbd not found: ID does not exist" containerID="d61edc48b89987cfbdf4d9a6002014c3ed5eed4ee311dd316a4924ab0d5efdbd" Jan 27 12:56:36 crc kubenswrapper[4900]: I0127 12:56:36.257424 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 27 12:56:36 crc kubenswrapper[4900]: I0127 12:56:36.271009 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 27 12:56:36 crc kubenswrapper[4900]: I0127 12:56:36.962814 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"780131af-30a7-406a-8ae9-b9a3a0826d1e","Type":"ContainerStarted","Data":"cb06dd062444f955782181415dfdb26e08a859af5354d7e4bd1aad066b2fba13"} Jan 27 12:56:36 crc kubenswrapper[4900]: I0127 12:56:36.965172 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f460e552-e35d-44f9-9041-82280a4a840e","Type":"ContainerStarted","Data":"d9f21db4669a2dab960ef9ea130751bea3275a40c3d1718e52df42abea21ab1c"} Jan 27 12:56:36 crc kubenswrapper[4900]: I0127 12:56:36.968361 4900 generic.go:334] "Generic (PLEG): container finished" podID="b63e2e42-d12b-451e-a055-33abd597ddcd" containerID="4b4c632f3a1d298214a9ac0bd940e628ec35268607ba5277271b116e5ee31cec" exitCode=0 Jan 27 12:56:36 crc kubenswrapper[4900]: I0127 12:56:36.968478 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" event={"ID":"b63e2e42-d12b-451e-a055-33abd597ddcd","Type":"ContainerDied","Data":"4b4c632f3a1d298214a9ac0bd940e628ec35268607ba5277271b116e5ee31cec"} Jan 27 12:56:36 crc kubenswrapper[4900]: I0127 12:56:36.975081 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"b7437170-ba39-40c1-a876-4860f350d1e6","Type":"ContainerStarted","Data":"45d8b5cb46073534f61a379b3db5dd44e71a9d6bf2b3b9db972d597e19020a7a"} Jan 27 12:56:37 crc kubenswrapper[4900]: I0127 12:56:37.991463 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" event={"ID":"b63e2e42-d12b-451e-a055-33abd597ddcd","Type":"ContainerStarted","Data":"1bfcf07b19877b09139f7f72a5d3c7efc395a213c90152b8a4086e05e1a249f8"} Jan 27 12:56:37 crc kubenswrapper[4900]: I0127 12:56:37.993399 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:38 crc kubenswrapper[4900]: I0127 12:56:37.999951 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"780131af-30a7-406a-8ae9-b9a3a0826d1e","Type":"ContainerStarted","Data":"d510b2f2cdbd53e51db5814ed188c9f0ea2212e98601483556a23d651df4086c"} Jan 27 12:56:38 crc kubenswrapper[4900]: I0127 12:56:38.024890 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" podStartSLOduration=24.024861644 podStartE2EDuration="24.024861644s" podCreationTimestamp="2026-01-27 12:56:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:56:38.021046743 +0000 UTC m=+1825.258074953" watchObservedRunningTime="2026-01-27 12:56:38.024861644 +0000 UTC m=+1825.261889854" Jan 27 12:56:38 crc kubenswrapper[4900]: E0127 12:56:38.882255 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="780131af-30a7-406a-8ae9-b9a3a0826d1e" Jan 27 12:56:39 crc kubenswrapper[4900]: I0127 12:56:39.024867 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"780131af-30a7-406a-8ae9-b9a3a0826d1e","Type":"ContainerStarted","Data":"f3792c062121b88220f4de70cf828479e00503e0767235c0d5b679740da937fa"} Jan 27 12:56:39 crc kubenswrapper[4900]: I0127 12:56:39.026822 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 27 12:56:39 crc kubenswrapper[4900]: E0127 12:56:39.027592 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="780131af-30a7-406a-8ae9-b9a3a0826d1e" Jan 27 12:56:39 crc kubenswrapper[4900]: I0127 12:56:39.046867 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f460e552-e35d-44f9-9041-82280a4a840e","Type":"ContainerStarted","Data":"ca567738d061f502e6f62981d61ab17b976f4e10016b543d7fb3d04960c14f10"} Jan 27 12:56:39 crc kubenswrapper[4900]: I0127 12:56:39.050496 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"b7437170-ba39-40c1-a876-4860f350d1e6","Type":"ContainerStarted","Data":"106f6f040c276a2381460626096442d62ba3252275944cbcaba93b583e5e68e5"} Jan 27 12:56:40 crc kubenswrapper[4900]: E0127 12:56:40.071717 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="780131af-30a7-406a-8ae9-b9a3a0826d1e" Jan 27 12:56:44 crc kubenswrapper[4900]: I0127 12:56:44.483966 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:56:44 crc kubenswrapper[4900]: E0127 12:56:44.484989 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:56:45 crc kubenswrapper[4900]: I0127 12:56:45.303509 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bb85b8995-z6xsp" Jan 27 12:56:45 crc kubenswrapper[4900]: I0127 12:56:45.425544 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-xkbsb"] Jan 27 12:56:45 crc kubenswrapper[4900]: I0127 12:56:45.425951 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" podUID="8b0296e5-d686-4e51-9753-b4a09e72183e" containerName="dnsmasq-dns" containerID="cri-o://49b0b9bb3449a59723ae32c5e7f5c4052f0b2fb6b4b871d90676ba999b6ad5fc" gracePeriod=10 Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.039123 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.155494 4900 generic.go:334] "Generic (PLEG): container finished" podID="8b0296e5-d686-4e51-9753-b4a09e72183e" containerID="49b0b9bb3449a59723ae32c5e7f5c4052f0b2fb6b4b871d90676ba999b6ad5fc" exitCode=0 Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.155567 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" event={"ID":"8b0296e5-d686-4e51-9753-b4a09e72183e","Type":"ContainerDied","Data":"49b0b9bb3449a59723ae32c5e7f5c4052f0b2fb6b4b871d90676ba999b6ad5fc"} Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.155691 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" event={"ID":"8b0296e5-d686-4e51-9753-b4a09e72183e","Type":"ContainerDied","Data":"0870b957adcf7b11010e514bfdfde1e069f2dee55a4d79108d731a61bd4885ef"} Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.155730 4900 scope.go:117] "RemoveContainer" containerID="49b0b9bb3449a59723ae32c5e7f5c4052f0b2fb6b4b871d90676ba999b6ad5fc" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.155758 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-xkbsb" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.182873 4900 scope.go:117] "RemoveContainer" containerID="bad03f9ef037dbc8322f3157fc6ab2e54d3134b9ba50281b2452bdab3dbd42e0" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.208990 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-dns-swift-storage-0\") pod \"8b0296e5-d686-4e51-9753-b4a09e72183e\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.209174 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-ovsdbserver-nb\") pod \"8b0296e5-d686-4e51-9753-b4a09e72183e\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.209654 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-config\") pod \"8b0296e5-d686-4e51-9753-b4a09e72183e\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.209712 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-dns-svc\") pod \"8b0296e5-d686-4e51-9753-b4a09e72183e\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.209793 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-ovsdbserver-sb\") pod \"8b0296e5-d686-4e51-9753-b4a09e72183e\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.209861 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5ds9\" (UniqueName: \"kubernetes.io/projected/8b0296e5-d686-4e51-9753-b4a09e72183e-kube-api-access-x5ds9\") pod \"8b0296e5-d686-4e51-9753-b4a09e72183e\" (UID: \"8b0296e5-d686-4e51-9753-b4a09e72183e\") " Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.217479 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b0296e5-d686-4e51-9753-b4a09e72183e-kube-api-access-x5ds9" (OuterVolumeSpecName: "kube-api-access-x5ds9") pod "8b0296e5-d686-4e51-9753-b4a09e72183e" (UID: "8b0296e5-d686-4e51-9753-b4a09e72183e"). InnerVolumeSpecName "kube-api-access-x5ds9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.217815 4900 scope.go:117] "RemoveContainer" containerID="49b0b9bb3449a59723ae32c5e7f5c4052f0b2fb6b4b871d90676ba999b6ad5fc" Jan 27 12:56:46 crc kubenswrapper[4900]: E0127 12:56:46.218460 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49b0b9bb3449a59723ae32c5e7f5c4052f0b2fb6b4b871d90676ba999b6ad5fc\": container with ID starting with 49b0b9bb3449a59723ae32c5e7f5c4052f0b2fb6b4b871d90676ba999b6ad5fc not found: ID does not exist" containerID="49b0b9bb3449a59723ae32c5e7f5c4052f0b2fb6b4b871d90676ba999b6ad5fc" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.218531 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49b0b9bb3449a59723ae32c5e7f5c4052f0b2fb6b4b871d90676ba999b6ad5fc"} err="failed to get container status \"49b0b9bb3449a59723ae32c5e7f5c4052f0b2fb6b4b871d90676ba999b6ad5fc\": rpc error: code = NotFound desc = could not find container \"49b0b9bb3449a59723ae32c5e7f5c4052f0b2fb6b4b871d90676ba999b6ad5fc\": container with ID starting with 49b0b9bb3449a59723ae32c5e7f5c4052f0b2fb6b4b871d90676ba999b6ad5fc not found: ID does not exist" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.218574 4900 scope.go:117] "RemoveContainer" containerID="bad03f9ef037dbc8322f3157fc6ab2e54d3134b9ba50281b2452bdab3dbd42e0" Jan 27 12:56:46 crc kubenswrapper[4900]: E0127 12:56:46.219175 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bad03f9ef037dbc8322f3157fc6ab2e54d3134b9ba50281b2452bdab3dbd42e0\": container with ID starting with bad03f9ef037dbc8322f3157fc6ab2e54d3134b9ba50281b2452bdab3dbd42e0 not found: ID does not exist" containerID="bad03f9ef037dbc8322f3157fc6ab2e54d3134b9ba50281b2452bdab3dbd42e0" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.219233 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bad03f9ef037dbc8322f3157fc6ab2e54d3134b9ba50281b2452bdab3dbd42e0"} err="failed to get container status \"bad03f9ef037dbc8322f3157fc6ab2e54d3134b9ba50281b2452bdab3dbd42e0\": rpc error: code = NotFound desc = could not find container \"bad03f9ef037dbc8322f3157fc6ab2e54d3134b9ba50281b2452bdab3dbd42e0\": container with ID starting with bad03f9ef037dbc8322f3157fc6ab2e54d3134b9ba50281b2452bdab3dbd42e0 not found: ID does not exist" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.280158 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8b0296e5-d686-4e51-9753-b4a09e72183e" (UID: "8b0296e5-d686-4e51-9753-b4a09e72183e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.283842 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8b0296e5-d686-4e51-9753-b4a09e72183e" (UID: "8b0296e5-d686-4e51-9753-b4a09e72183e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.285989 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8b0296e5-d686-4e51-9753-b4a09e72183e" (UID: "8b0296e5-d686-4e51-9753-b4a09e72183e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.290737 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-config" (OuterVolumeSpecName: "config") pod "8b0296e5-d686-4e51-9753-b4a09e72183e" (UID: "8b0296e5-d686-4e51-9753-b4a09e72183e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.297314 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8b0296e5-d686-4e51-9753-b4a09e72183e" (UID: "8b0296e5-d686-4e51-9753-b4a09e72183e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.313755 4900 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-config\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.314020 4900 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.314113 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.314259 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5ds9\" (UniqueName: \"kubernetes.io/projected/8b0296e5-d686-4e51-9753-b4a09e72183e-kube-api-access-x5ds9\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.314323 4900 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.314382 4900 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8b0296e5-d686-4e51-9753-b4a09e72183e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.507498 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-xkbsb"] Jan 27 12:56:46 crc kubenswrapper[4900]: I0127 12:56:46.523409 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-xkbsb"] Jan 27 12:56:48 crc kubenswrapper[4900]: I0127 12:56:48.185710 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-z6j69" event={"ID":"01b46bc7-748f-4872-abb3-a7faef291c0b","Type":"ContainerStarted","Data":"54dc1e99596bff522866900a49ba07dbffa3d9d02aa9300147907b1c31d64bec"} Jan 27 12:56:48 crc kubenswrapper[4900]: I0127 12:56:48.233208 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-z6j69" podStartSLOduration=2.096722734 podStartE2EDuration="55.233179332s" podCreationTimestamp="2026-01-27 12:55:53 +0000 UTC" firstStartedPulling="2026-01-27 12:55:54.608547911 +0000 UTC m=+1781.845576111" lastFinishedPulling="2026-01-27 12:56:47.745004489 +0000 UTC m=+1834.982032709" observedRunningTime="2026-01-27 12:56:48.213996536 +0000 UTC m=+1835.451024746" watchObservedRunningTime="2026-01-27 12:56:48.233179332 +0000 UTC m=+1835.470207542" Jan 27 12:56:48 crc kubenswrapper[4900]: I0127 12:56:48.497928 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b0296e5-d686-4e51-9753-b4a09e72183e" path="/var/lib/kubelet/pods/8b0296e5-d686-4e51-9753-b4a09e72183e/volumes" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.398881 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7"] Jan 27 12:56:50 crc kubenswrapper[4900]: E0127 12:56:50.400655 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b0296e5-d686-4e51-9753-b4a09e72183e" containerName="dnsmasq-dns" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.400679 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b0296e5-d686-4e51-9753-b4a09e72183e" containerName="dnsmasq-dns" Jan 27 12:56:50 crc kubenswrapper[4900]: E0127 12:56:50.400716 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b0296e5-d686-4e51-9753-b4a09e72183e" containerName="init" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.400726 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b0296e5-d686-4e51-9753-b4a09e72183e" containerName="init" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.401203 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b0296e5-d686-4e51-9753-b4a09e72183e" containerName="dnsmasq-dns" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.402787 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.406437 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.406509 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.406548 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.406654 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.413550 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.413701 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.413855 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.413976 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhc2m\" (UniqueName: \"kubernetes.io/projected/cb831e66-cd2f-4a9f-8a72-385490b13aa3-kube-api-access-bhc2m\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.417554 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7"] Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.517594 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.517734 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.517807 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhc2m\" (UniqueName: \"kubernetes.io/projected/cb831e66-cd2f-4a9f-8a72-385490b13aa3-kube-api-access-bhc2m\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.517985 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.529245 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.529316 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.532561 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.536428 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhc2m\" (UniqueName: \"kubernetes.io/projected/cb831e66-cd2f-4a9f-8a72-385490b13aa3-kube-api-access-bhc2m\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:56:50 crc kubenswrapper[4900]: I0127 12:56:50.734776 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:56:51 crc kubenswrapper[4900]: I0127 12:56:51.226772 4900 generic.go:334] "Generic (PLEG): container finished" podID="01b46bc7-748f-4872-abb3-a7faef291c0b" containerID="54dc1e99596bff522866900a49ba07dbffa3d9d02aa9300147907b1c31d64bec" exitCode=0 Jan 27 12:56:51 crc kubenswrapper[4900]: I0127 12:56:51.226879 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-z6j69" event={"ID":"01b46bc7-748f-4872-abb3-a7faef291c0b","Type":"ContainerDied","Data":"54dc1e99596bff522866900a49ba07dbffa3d9d02aa9300147907b1c31d64bec"} Jan 27 12:56:52 crc kubenswrapper[4900]: I0127 12:56:52.052625 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7"] Jan 27 12:56:52 crc kubenswrapper[4900]: I0127 12:56:52.243002 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" event={"ID":"cb831e66-cd2f-4a9f-8a72-385490b13aa3","Type":"ContainerStarted","Data":"f299ad2574cabd51918f0cb645759a2ffb2b62ccfcdca535abc9a919415f4913"} Jan 27 12:56:52 crc kubenswrapper[4900]: I0127 12:56:52.507817 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 27 12:56:52 crc kubenswrapper[4900]: I0127 12:56:52.758879 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-z6j69" Jan 27 12:56:52 crc kubenswrapper[4900]: I0127 12:56:52.893353 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01b46bc7-748f-4872-abb3-a7faef291c0b-combined-ca-bundle\") pod \"01b46bc7-748f-4872-abb3-a7faef291c0b\" (UID: \"01b46bc7-748f-4872-abb3-a7faef291c0b\") " Jan 27 12:56:52 crc kubenswrapper[4900]: I0127 12:56:52.893958 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01b46bc7-748f-4872-abb3-a7faef291c0b-config-data\") pod \"01b46bc7-748f-4872-abb3-a7faef291c0b\" (UID: \"01b46bc7-748f-4872-abb3-a7faef291c0b\") " Jan 27 12:56:52 crc kubenswrapper[4900]: I0127 12:56:52.894130 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzsls\" (UniqueName: \"kubernetes.io/projected/01b46bc7-748f-4872-abb3-a7faef291c0b-kube-api-access-kzsls\") pod \"01b46bc7-748f-4872-abb3-a7faef291c0b\" (UID: \"01b46bc7-748f-4872-abb3-a7faef291c0b\") " Jan 27 12:56:52 crc kubenswrapper[4900]: I0127 12:56:52.901756 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01b46bc7-748f-4872-abb3-a7faef291c0b-kube-api-access-kzsls" (OuterVolumeSpecName: "kube-api-access-kzsls") pod "01b46bc7-748f-4872-abb3-a7faef291c0b" (UID: "01b46bc7-748f-4872-abb3-a7faef291c0b"). InnerVolumeSpecName "kube-api-access-kzsls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:56:52 crc kubenswrapper[4900]: I0127 12:56:52.944680 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01b46bc7-748f-4872-abb3-a7faef291c0b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "01b46bc7-748f-4872-abb3-a7faef291c0b" (UID: "01b46bc7-748f-4872-abb3-a7faef291c0b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:56:53 crc kubenswrapper[4900]: I0127 12:56:53.000423 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzsls\" (UniqueName: \"kubernetes.io/projected/01b46bc7-748f-4872-abb3-a7faef291c0b-kube-api-access-kzsls\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:53 crc kubenswrapper[4900]: I0127 12:56:53.000906 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01b46bc7-748f-4872-abb3-a7faef291c0b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:53 crc kubenswrapper[4900]: I0127 12:56:53.024263 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01b46bc7-748f-4872-abb3-a7faef291c0b-config-data" (OuterVolumeSpecName: "config-data") pod "01b46bc7-748f-4872-abb3-a7faef291c0b" (UID: "01b46bc7-748f-4872-abb3-a7faef291c0b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:56:53 crc kubenswrapper[4900]: I0127 12:56:53.103344 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01b46bc7-748f-4872-abb3-a7faef291c0b-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:56:53 crc kubenswrapper[4900]: I0127 12:56:53.265177 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"780131af-30a7-406a-8ae9-b9a3a0826d1e","Type":"ContainerStarted","Data":"7b6ea029f6d464c8d975de774809f4a8ad2d400f9a65de460b9211083ded43f3"} Jan 27 12:56:53 crc kubenswrapper[4900]: I0127 12:56:53.269575 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-z6j69" event={"ID":"01b46bc7-748f-4872-abb3-a7faef291c0b","Type":"ContainerDied","Data":"37560e99d5aa0b9fa20b8cc289e0a552c318fad7a762f910ed0640deb29ea643"} Jan 27 12:56:53 crc kubenswrapper[4900]: I0127 12:56:53.269629 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="37560e99d5aa0b9fa20b8cc289e0a552c318fad7a762f910ed0640deb29ea643" Jan 27 12:56:53 crc kubenswrapper[4900]: I0127 12:56:53.269729 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-z6j69" Jan 27 12:56:53 crc kubenswrapper[4900]: I0127 12:56:53.304038 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.161861739 podStartE2EDuration="50.304010719s" podCreationTimestamp="2026-01-27 12:56:03 +0000 UTC" firstStartedPulling="2026-01-27 12:56:04.794837081 +0000 UTC m=+1792.031865291" lastFinishedPulling="2026-01-27 12:56:52.936986051 +0000 UTC m=+1840.174014271" observedRunningTime="2026-01-27 12:56:53.298911251 +0000 UTC m=+1840.535939471" watchObservedRunningTime="2026-01-27 12:56:53.304010719 +0000 UTC m=+1840.541038929" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.398555 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-555ff797c5-bldkh"] Jan 27 12:56:54 crc kubenswrapper[4900]: E0127 12:56:54.399668 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01b46bc7-748f-4872-abb3-a7faef291c0b" containerName="heat-db-sync" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.399688 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="01b46bc7-748f-4872-abb3-a7faef291c0b" containerName="heat-db-sync" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.399982 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="01b46bc7-748f-4872-abb3-a7faef291c0b" containerName="heat-db-sync" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.401209 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.437630 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-555ff797c5-bldkh"] Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.449115 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f43811ef-7c32-49e2-b59f-9b845dd80a4f-config-data-custom\") pod \"heat-engine-555ff797c5-bldkh\" (UID: \"f43811ef-7c32-49e2-b59f-9b845dd80a4f\") " pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.449207 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6898m\" (UniqueName: \"kubernetes.io/projected/f43811ef-7c32-49e2-b59f-9b845dd80a4f-kube-api-access-6898m\") pod \"heat-engine-555ff797c5-bldkh\" (UID: \"f43811ef-7c32-49e2-b59f-9b845dd80a4f\") " pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.449242 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f43811ef-7c32-49e2-b59f-9b845dd80a4f-combined-ca-bundle\") pod \"heat-engine-555ff797c5-bldkh\" (UID: \"f43811ef-7c32-49e2-b59f-9b845dd80a4f\") " pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.449540 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f43811ef-7c32-49e2-b59f-9b845dd80a4f-config-data\") pod \"heat-engine-555ff797c5-bldkh\" (UID: \"f43811ef-7c32-49e2-b59f-9b845dd80a4f\") " pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.514585 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-857c4f4785-hvkmw"] Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.521159 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-5ff866d6bc-dnsvf"] Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.521441 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.525125 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.537971 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-857c4f4785-hvkmw"] Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.557342 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4q6x\" (UniqueName: \"kubernetes.io/projected/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-kube-api-access-c4q6x\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.557427 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-config-data-custom\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.557532 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-public-tls-certs\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.558840 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-config-data\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.565485 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-internal-tls-certs\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.565976 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f43811ef-7c32-49e2-b59f-9b845dd80a4f-config-data\") pod \"heat-engine-555ff797c5-bldkh\" (UID: \"f43811ef-7c32-49e2-b59f-9b845dd80a4f\") " pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.566027 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-config-data\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.566045 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-config-data-custom\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.566272 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-combined-ca-bundle\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.566349 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-combined-ca-bundle\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.566424 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f43811ef-7c32-49e2-b59f-9b845dd80a4f-config-data-custom\") pod \"heat-engine-555ff797c5-bldkh\" (UID: \"f43811ef-7c32-49e2-b59f-9b845dd80a4f\") " pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.566453 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6898m\" (UniqueName: \"kubernetes.io/projected/f43811ef-7c32-49e2-b59f-9b845dd80a4f-kube-api-access-6898m\") pod \"heat-engine-555ff797c5-bldkh\" (UID: \"f43811ef-7c32-49e2-b59f-9b845dd80a4f\") " pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.566493 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f43811ef-7c32-49e2-b59f-9b845dd80a4f-combined-ca-bundle\") pod \"heat-engine-555ff797c5-bldkh\" (UID: \"f43811ef-7c32-49e2-b59f-9b845dd80a4f\") " pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.566563 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2nlm\" (UniqueName: \"kubernetes.io/projected/023f0a68-6e12-4839-a916-6a08d907a415-kube-api-access-l2nlm\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.566597 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-internal-tls-certs\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.566622 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-public-tls-certs\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.572238 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5ff866d6bc-dnsvf"] Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.598729 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f43811ef-7c32-49e2-b59f-9b845dd80a4f-config-data-custom\") pod \"heat-engine-555ff797c5-bldkh\" (UID: \"f43811ef-7c32-49e2-b59f-9b845dd80a4f\") " pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.599437 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f43811ef-7c32-49e2-b59f-9b845dd80a4f-combined-ca-bundle\") pod \"heat-engine-555ff797c5-bldkh\" (UID: \"f43811ef-7c32-49e2-b59f-9b845dd80a4f\") " pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.599823 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f43811ef-7c32-49e2-b59f-9b845dd80a4f-config-data\") pod \"heat-engine-555ff797c5-bldkh\" (UID: \"f43811ef-7c32-49e2-b59f-9b845dd80a4f\") " pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.623020 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6898m\" (UniqueName: \"kubernetes.io/projected/f43811ef-7c32-49e2-b59f-9b845dd80a4f-kube-api-access-6898m\") pod \"heat-engine-555ff797c5-bldkh\" (UID: \"f43811ef-7c32-49e2-b59f-9b845dd80a4f\") " pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.701078 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2nlm\" (UniqueName: \"kubernetes.io/projected/023f0a68-6e12-4839-a916-6a08d907a415-kube-api-access-l2nlm\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.701146 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-internal-tls-certs\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.701171 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-public-tls-certs\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.701337 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4q6x\" (UniqueName: \"kubernetes.io/projected/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-kube-api-access-c4q6x\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.701383 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-config-data-custom\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.701432 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-public-tls-certs\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.701502 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-config-data\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.701630 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-internal-tls-certs\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.701715 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-config-data\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.701739 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-config-data-custom\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.701860 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-combined-ca-bundle\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.701928 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-combined-ca-bundle\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.712785 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-public-tls-certs\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.713187 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-internal-tls-certs\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.715092 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-config-data-custom\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.717311 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-config-data\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.717433 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-config-data\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.718939 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-combined-ca-bundle\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.720237 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-combined-ca-bundle\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.725710 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-internal-tls-certs\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.726432 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/023f0a68-6e12-4839-a916-6a08d907a415-public-tls-certs\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.728032 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2nlm\" (UniqueName: \"kubernetes.io/projected/023f0a68-6e12-4839-a916-6a08d907a415-kube-api-access-l2nlm\") pod \"heat-cfnapi-5ff866d6bc-dnsvf\" (UID: \"023f0a68-6e12-4839-a916-6a08d907a415\") " pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.734347 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4q6x\" (UniqueName: \"kubernetes.io/projected/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-kube-api-access-c4q6x\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.734806 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.737371 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9bfbc62-f1f1-409e-856c-63d4ee15ea7a-config-data-custom\") pod \"heat-api-857c4f4785-hvkmw\" (UID: \"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a\") " pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.876384 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:54 crc kubenswrapper[4900]: I0127 12:56:54.930870 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:55 crc kubenswrapper[4900]: I0127 12:56:55.479584 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-555ff797c5-bldkh"] Jan 27 12:56:55 crc kubenswrapper[4900]: I0127 12:56:55.485626 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:56:55 crc kubenswrapper[4900]: E0127 12:56:55.486001 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:56:55 crc kubenswrapper[4900]: I0127 12:56:55.644942 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-857c4f4785-hvkmw"] Jan 27 12:56:55 crc kubenswrapper[4900]: W0127 12:56:55.657476 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9bfbc62_f1f1_409e_856c_63d4ee15ea7a.slice/crio-cf15f0a8a3bfbccfac1e697f899bc82c0f18ea20e12910ef3039921313ab2326 WatchSource:0}: Error finding container cf15f0a8a3bfbccfac1e697f899bc82c0f18ea20e12910ef3039921313ab2326: Status 404 returned error can't find the container with id cf15f0a8a3bfbccfac1e697f899bc82c0f18ea20e12910ef3039921313ab2326 Jan 27 12:56:55 crc kubenswrapper[4900]: W0127 12:56:55.662238 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod023f0a68_6e12_4839_a916_6a08d907a415.slice/crio-80bdf25844b2d2d0c9142b166ea25f93852b3dcbe006d734ac2b4e773946415a WatchSource:0}: Error finding container 80bdf25844b2d2d0c9142b166ea25f93852b3dcbe006d734ac2b4e773946415a: Status 404 returned error can't find the container with id 80bdf25844b2d2d0c9142b166ea25f93852b3dcbe006d734ac2b4e773946415a Jan 27 12:56:55 crc kubenswrapper[4900]: I0127 12:56:55.669671 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5ff866d6bc-dnsvf"] Jan 27 12:56:56 crc kubenswrapper[4900]: I0127 12:56:56.369405 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-555ff797c5-bldkh" event={"ID":"f43811ef-7c32-49e2-b59f-9b845dd80a4f","Type":"ContainerStarted","Data":"bb95ea66d857485f466fe39b535a776a201d634407543b0bf6c2e64975aa15c2"} Jan 27 12:56:56 crc kubenswrapper[4900]: I0127 12:56:56.369787 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-555ff797c5-bldkh" event={"ID":"f43811ef-7c32-49e2-b59f-9b845dd80a4f","Type":"ContainerStarted","Data":"49345c1c6017ffd0792f50c8e6c208de93d9b38a3f13778575d3ba5287fb0127"} Jan 27 12:56:56 crc kubenswrapper[4900]: I0127 12:56:56.372300 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:56:56 crc kubenswrapper[4900]: I0127 12:56:56.374506 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" event={"ID":"023f0a68-6e12-4839-a916-6a08d907a415","Type":"ContainerStarted","Data":"80bdf25844b2d2d0c9142b166ea25f93852b3dcbe006d734ac2b4e773946415a"} Jan 27 12:56:56 crc kubenswrapper[4900]: I0127 12:56:56.378079 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-857c4f4785-hvkmw" event={"ID":"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a","Type":"ContainerStarted","Data":"cf15f0a8a3bfbccfac1e697f899bc82c0f18ea20e12910ef3039921313ab2326"} Jan 27 12:56:56 crc kubenswrapper[4900]: I0127 12:56:56.416722 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-555ff797c5-bldkh" podStartSLOduration=2.416699166 podStartE2EDuration="2.416699166s" podCreationTimestamp="2026-01-27 12:56:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:56:56.39065897 +0000 UTC m=+1843.627687180" watchObservedRunningTime="2026-01-27 12:56:56.416699166 +0000 UTC m=+1843.653727376" Jan 27 12:56:59 crc kubenswrapper[4900]: I0127 12:56:59.434375 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-857c4f4785-hvkmw" event={"ID":"c9bfbc62-f1f1-409e-856c-63d4ee15ea7a","Type":"ContainerStarted","Data":"86aacb4b7190d20c688219f66a63c2b2f60accdc196b72e75db06087aef894a2"} Jan 27 12:56:59 crc kubenswrapper[4900]: I0127 12:56:59.435209 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:56:59 crc kubenswrapper[4900]: I0127 12:56:59.439884 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" event={"ID":"023f0a68-6e12-4839-a916-6a08d907a415","Type":"ContainerStarted","Data":"9b9ebd042555ccfa4e2e6ed44a18be3bd7cb2d10ba75a0cd2ee455ef33b72541"} Jan 27 12:56:59 crc kubenswrapper[4900]: I0127 12:56:59.440348 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:56:59 crc kubenswrapper[4900]: I0127 12:56:59.460142 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-857c4f4785-hvkmw" podStartSLOduration=2.987422593 podStartE2EDuration="5.460120662s" podCreationTimestamp="2026-01-27 12:56:54 +0000 UTC" firstStartedPulling="2026-01-27 12:56:55.663562275 +0000 UTC m=+1842.900590485" lastFinishedPulling="2026-01-27 12:56:58.136260344 +0000 UTC m=+1845.373288554" observedRunningTime="2026-01-27 12:56:59.456853837 +0000 UTC m=+1846.693882047" watchObservedRunningTime="2026-01-27 12:56:59.460120662 +0000 UTC m=+1846.697148882" Jan 27 12:56:59 crc kubenswrapper[4900]: I0127 12:56:59.481248 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" podStartSLOduration=3.02141256 podStartE2EDuration="5.481232165s" podCreationTimestamp="2026-01-27 12:56:54 +0000 UTC" firstStartedPulling="2026-01-27 12:56:55.667932412 +0000 UTC m=+1842.904960622" lastFinishedPulling="2026-01-27 12:56:58.127752017 +0000 UTC m=+1845.364780227" observedRunningTime="2026-01-27 12:56:59.477855077 +0000 UTC m=+1846.714883297" watchObservedRunningTime="2026-01-27 12:56:59.481232165 +0000 UTC m=+1846.718260375" Jan 27 12:57:07 crc kubenswrapper[4900]: I0127 12:57:07.482674 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:57:07 crc kubenswrapper[4900]: E0127 12:57:07.483729 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:57:07 crc kubenswrapper[4900]: I0127 12:57:07.607242 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" event={"ID":"cb831e66-cd2f-4a9f-8a72-385490b13aa3","Type":"ContainerStarted","Data":"43c847995809adb221ca2bddbea595410d64a0d42629cca4eb8cb13479776177"} Jan 27 12:57:07 crc kubenswrapper[4900]: I0127 12:57:07.947206 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-5ff866d6bc-dnsvf" Jan 27 12:57:07 crc kubenswrapper[4900]: I0127 12:57:07.952627 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-857c4f4785-hvkmw" Jan 27 12:57:07 crc kubenswrapper[4900]: I0127 12:57:07.982976 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" podStartSLOduration=3.212964588 podStartE2EDuration="17.98294436s" podCreationTimestamp="2026-01-27 12:56:50 +0000 UTC" firstStartedPulling="2026-01-27 12:56:52.065402144 +0000 UTC m=+1839.302430354" lastFinishedPulling="2026-01-27 12:57:06.835381916 +0000 UTC m=+1854.072410126" observedRunningTime="2026-01-27 12:57:07.632676988 +0000 UTC m=+1854.869705198" watchObservedRunningTime="2026-01-27 12:57:07.98294436 +0000 UTC m=+1855.219972590" Jan 27 12:57:08 crc kubenswrapper[4900]: I0127 12:57:08.069046 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-799b6c7dd6-cz9kf"] Jan 27 12:57:08 crc kubenswrapper[4900]: I0127 12:57:08.069302 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" podUID="d30b65d6-3363-496a-97c9-8983b5332464" containerName="heat-cfnapi" containerID="cri-o://598bc50f504dcb77d53ea08302261e3f4e53ab9a648ac6488d9735d738e289ac" gracePeriod=60 Jan 27 12:57:08 crc kubenswrapper[4900]: I0127 12:57:08.122336 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-6f85ff8cdd-9lj8l"] Jan 27 12:57:08 crc kubenswrapper[4900]: I0127 12:57:08.122626 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-6f85ff8cdd-9lj8l" podUID="b44caf11-0144-4419-a3fd-49a686e81f0a" containerName="heat-api" containerID="cri-o://48f6f2cbc0d29460a39333c7fa6f45fcf8da6d16bbb2fd0bb2d583c0dc7af4b0" gracePeriod=60 Jan 27 12:57:10 crc kubenswrapper[4900]: I0127 12:57:10.728907 4900 generic.go:334] "Generic (PLEG): container finished" podID="f460e552-e35d-44f9-9041-82280a4a840e" containerID="ca567738d061f502e6f62981d61ab17b976f4e10016b543d7fb3d04960c14f10" exitCode=0 Jan 27 12:57:10 crc kubenswrapper[4900]: I0127 12:57:10.729137 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f460e552-e35d-44f9-9041-82280a4a840e","Type":"ContainerDied","Data":"ca567738d061f502e6f62981d61ab17b976f4e10016b543d7fb3d04960c14f10"} Jan 27 12:57:10 crc kubenswrapper[4900]: I0127 12:57:10.738254 4900 generic.go:334] "Generic (PLEG): container finished" podID="b7437170-ba39-40c1-a876-4860f350d1e6" containerID="106f6f040c276a2381460626096442d62ba3252275944cbcaba93b583e5e68e5" exitCode=0 Jan 27 12:57:10 crc kubenswrapper[4900]: I0127 12:57:10.738324 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"b7437170-ba39-40c1-a876-4860f350d1e6","Type":"ContainerDied","Data":"106f6f040c276a2381460626096442d62ba3252275944cbcaba93b583e5e68e5"} Jan 27 12:57:11 crc kubenswrapper[4900]: I0127 12:57:11.312784 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-6f85ff8cdd-9lj8l" podUID="b44caf11-0144-4419-a3fd-49a686e81f0a" containerName="heat-api" probeResult="failure" output="Get \"https://10.217.0.224:8004/healthcheck\": read tcp 10.217.0.2:60056->10.217.0.224:8004: read: connection reset by peer" Jan 27 12:57:11 crc kubenswrapper[4900]: I0127 12:57:11.575552 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" podUID="d30b65d6-3363-496a-97c9-8983b5332464" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.0.223:8000/healthcheck\": read tcp 10.217.0.2:45852->10.217.0.223:8000: read: connection reset by peer" Jan 27 12:57:11 crc kubenswrapper[4900]: I0127 12:57:11.769812 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"b7437170-ba39-40c1-a876-4860f350d1e6","Type":"ContainerStarted","Data":"ccb581b3745d0b1333d0af08e1820049e8a56fe95654174dafa1bcb9697a8bdc"} Jan 27 12:57:11 crc kubenswrapper[4900]: I0127 12:57:11.770275 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-2" Jan 27 12:57:11 crc kubenswrapper[4900]: I0127 12:57:11.777436 4900 generic.go:334] "Generic (PLEG): container finished" podID="b44caf11-0144-4419-a3fd-49a686e81f0a" containerID="48f6f2cbc0d29460a39333c7fa6f45fcf8da6d16bbb2fd0bb2d583c0dc7af4b0" exitCode=0 Jan 27 12:57:11 crc kubenswrapper[4900]: I0127 12:57:11.777517 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6f85ff8cdd-9lj8l" event={"ID":"b44caf11-0144-4419-a3fd-49a686e81f0a","Type":"ContainerDied","Data":"48f6f2cbc0d29460a39333c7fa6f45fcf8da6d16bbb2fd0bb2d583c0dc7af4b0"} Jan 27 12:57:11 crc kubenswrapper[4900]: I0127 12:57:11.782113 4900 generic.go:334] "Generic (PLEG): container finished" podID="d30b65d6-3363-496a-97c9-8983b5332464" containerID="598bc50f504dcb77d53ea08302261e3f4e53ab9a648ac6488d9735d738e289ac" exitCode=0 Jan 27 12:57:11 crc kubenswrapper[4900]: I0127 12:57:11.782196 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" event={"ID":"d30b65d6-3363-496a-97c9-8983b5332464","Type":"ContainerDied","Data":"598bc50f504dcb77d53ea08302261e3f4e53ab9a648ac6488d9735d738e289ac"} Jan 27 12:57:11 crc kubenswrapper[4900]: I0127 12:57:11.784272 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f460e552-e35d-44f9-9041-82280a4a840e","Type":"ContainerStarted","Data":"0dea95b5c7b308aede309155312af19c8eb9f5ccff2cf630b397dcfacf7dd302"} Jan 27 12:57:11 crc kubenswrapper[4900]: I0127 12:57:11.784714 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:57:11 crc kubenswrapper[4900]: I0127 12:57:11.822801 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-2" podStartSLOduration=52.822767812 podStartE2EDuration="52.822767812s" podCreationTimestamp="2026-01-27 12:56:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:57:11.806415548 +0000 UTC m=+1859.043443758" watchObservedRunningTime="2026-01-27 12:57:11.822767812 +0000 UTC m=+1859.059796022" Jan 27 12:57:11 crc kubenswrapper[4900]: I0127 12:57:11.849333 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=52.849299612 podStartE2EDuration="52.849299612s" podCreationTimestamp="2026-01-27 12:56:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:57:11.839045395 +0000 UTC m=+1859.076073605" watchObservedRunningTime="2026-01-27 12:57:11.849299612 +0000 UTC m=+1859.086327822" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.004583 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.191322 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-config-data-custom\") pod \"b44caf11-0144-4419-a3fd-49a686e81f0a\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.191784 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-public-tls-certs\") pod \"b44caf11-0144-4419-a3fd-49a686e81f0a\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.192106 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-combined-ca-bundle\") pod \"b44caf11-0144-4419-a3fd-49a686e81f0a\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.193918 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-config-data\") pod \"b44caf11-0144-4419-a3fd-49a686e81f0a\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.193975 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dv7wb\" (UniqueName: \"kubernetes.io/projected/b44caf11-0144-4419-a3fd-49a686e81f0a-kube-api-access-dv7wb\") pod \"b44caf11-0144-4419-a3fd-49a686e81f0a\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.194024 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-internal-tls-certs\") pod \"b44caf11-0144-4419-a3fd-49a686e81f0a\" (UID: \"b44caf11-0144-4419-a3fd-49a686e81f0a\") " Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.221149 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b44caf11-0144-4419-a3fd-49a686e81f0a-kube-api-access-dv7wb" (OuterVolumeSpecName: "kube-api-access-dv7wb") pod "b44caf11-0144-4419-a3fd-49a686e81f0a" (UID: "b44caf11-0144-4419-a3fd-49a686e81f0a"). InnerVolumeSpecName "kube-api-access-dv7wb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.291020 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b44caf11-0144-4419-a3fd-49a686e81f0a" (UID: "b44caf11-0144-4419-a3fd-49a686e81f0a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.291409 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b44caf11-0144-4419-a3fd-49a686e81f0a" (UID: "b44caf11-0144-4419-a3fd-49a686e81f0a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.300656 4900 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.300782 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.300916 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dv7wb\" (UniqueName: \"kubernetes.io/projected/b44caf11-0144-4419-a3fd-49a686e81f0a-kube-api-access-dv7wb\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.323439 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-config-data" (OuterVolumeSpecName: "config-data") pod "b44caf11-0144-4419-a3fd-49a686e81f0a" (UID: "b44caf11-0144-4419-a3fd-49a686e81f0a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.361907 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b44caf11-0144-4419-a3fd-49a686e81f0a" (UID: "b44caf11-0144-4419-a3fd-49a686e81f0a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.371174 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b44caf11-0144-4419-a3fd-49a686e81f0a" (UID: "b44caf11-0144-4419-a3fd-49a686e81f0a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.403563 4900 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.403610 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.403629 4900 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b44caf11-0144-4419-a3fd-49a686e81f0a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.487774 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.616938 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-internal-tls-certs\") pod \"d30b65d6-3363-496a-97c9-8983b5332464\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.617012 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85kmk\" (UniqueName: \"kubernetes.io/projected/d30b65d6-3363-496a-97c9-8983b5332464-kube-api-access-85kmk\") pod \"d30b65d6-3363-496a-97c9-8983b5332464\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.617068 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-config-data\") pod \"d30b65d6-3363-496a-97c9-8983b5332464\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.617172 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-config-data-custom\") pod \"d30b65d6-3363-496a-97c9-8983b5332464\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.617237 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-combined-ca-bundle\") pod \"d30b65d6-3363-496a-97c9-8983b5332464\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.617270 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-public-tls-certs\") pod \"d30b65d6-3363-496a-97c9-8983b5332464\" (UID: \"d30b65d6-3363-496a-97c9-8983b5332464\") " Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.625137 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d30b65d6-3363-496a-97c9-8983b5332464" (UID: "d30b65d6-3363-496a-97c9-8983b5332464"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.626321 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d30b65d6-3363-496a-97c9-8983b5332464-kube-api-access-85kmk" (OuterVolumeSpecName: "kube-api-access-85kmk") pod "d30b65d6-3363-496a-97c9-8983b5332464" (UID: "d30b65d6-3363-496a-97c9-8983b5332464"). InnerVolumeSpecName "kube-api-access-85kmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.693277 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d30b65d6-3363-496a-97c9-8983b5332464" (UID: "d30b65d6-3363-496a-97c9-8983b5332464"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.693698 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d30b65d6-3363-496a-97c9-8983b5332464" (UID: "d30b65d6-3363-496a-97c9-8983b5332464"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.694958 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d30b65d6-3363-496a-97c9-8983b5332464" (UID: "d30b65d6-3363-496a-97c9-8983b5332464"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.702372 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-config-data" (OuterVolumeSpecName: "config-data") pod "d30b65d6-3363-496a-97c9-8983b5332464" (UID: "d30b65d6-3363-496a-97c9-8983b5332464"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.722763 4900 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.722826 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85kmk\" (UniqueName: \"kubernetes.io/projected/d30b65d6-3363-496a-97c9-8983b5332464-kube-api-access-85kmk\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.722845 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.722855 4900 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.722868 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.722878 4900 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d30b65d6-3363-496a-97c9-8983b5332464-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.802477 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" event={"ID":"d30b65d6-3363-496a-97c9-8983b5332464","Type":"ContainerDied","Data":"caf1c1201a0bcd4fcacaf2e12b8b46ed183c90ae41e06da2bd6b2282f44aa050"} Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.802552 4900 scope.go:117] "RemoveContainer" containerID="598bc50f504dcb77d53ea08302261e3f4e53ab9a648ac6488d9735d738e289ac" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.802592 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-799b6c7dd6-cz9kf" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.805997 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6f85ff8cdd-9lj8l" event={"ID":"b44caf11-0144-4419-a3fd-49a686e81f0a","Type":"ContainerDied","Data":"3de59d9bb9e1c13d73bc69f0ef5081f6b57b1d8c564441ef6777841d8b4da668"} Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.806200 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6f85ff8cdd-9lj8l" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.885806 4900 scope.go:117] "RemoveContainer" containerID="48f6f2cbc0d29460a39333c7fa6f45fcf8da6d16bbb2fd0bb2d583c0dc7af4b0" Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.914197 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-799b6c7dd6-cz9kf"] Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.948456 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-799b6c7dd6-cz9kf"] Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.964465 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-6f85ff8cdd-9lj8l"] Jan 27 12:57:12 crc kubenswrapper[4900]: I0127 12:57:12.981046 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-6f85ff8cdd-9lj8l"] Jan 27 12:57:14 crc kubenswrapper[4900]: I0127 12:57:14.496752 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b44caf11-0144-4419-a3fd-49a686e81f0a" path="/var/lib/kubelet/pods/b44caf11-0144-4419-a3fd-49a686e81f0a/volumes" Jan 27 12:57:14 crc kubenswrapper[4900]: I0127 12:57:14.497862 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d30b65d6-3363-496a-97c9-8983b5332464" path="/var/lib/kubelet/pods/d30b65d6-3363-496a-97c9-8983b5332464/volumes" Jan 27 12:57:14 crc kubenswrapper[4900]: I0127 12:57:14.773895 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-555ff797c5-bldkh" Jan 27 12:57:14 crc kubenswrapper[4900]: I0127 12:57:14.831537 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-55dc67899d-kc667"] Jan 27 12:57:14 crc kubenswrapper[4900]: I0127 12:57:14.831860 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-55dc67899d-kc667" podUID="eed3fb3f-18d6-4382-85f1-77235d870e91" containerName="heat-engine" containerID="cri-o://f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef" gracePeriod=60 Jan 27 12:57:19 crc kubenswrapper[4900]: E0127 12:57:19.526188 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 27 12:57:19 crc kubenswrapper[4900]: E0127 12:57:19.530454 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 27 12:57:19 crc kubenswrapper[4900]: E0127 12:57:19.535217 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 27 12:57:19 crc kubenswrapper[4900]: E0127 12:57:19.535341 4900 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-55dc67899d-kc667" podUID="eed3fb3f-18d6-4382-85f1-77235d870e91" containerName="heat-engine" Jan 27 12:57:19 crc kubenswrapper[4900]: I0127 12:57:19.917546 4900 generic.go:334] "Generic (PLEG): container finished" podID="cb831e66-cd2f-4a9f-8a72-385490b13aa3" containerID="43c847995809adb221ca2bddbea595410d64a0d42629cca4eb8cb13479776177" exitCode=0 Jan 27 12:57:19 crc kubenswrapper[4900]: I0127 12:57:19.917592 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" event={"ID":"cb831e66-cd2f-4a9f-8a72-385490b13aa3","Type":"ContainerDied","Data":"43c847995809adb221ca2bddbea595410d64a0d42629cca4eb8cb13479776177"} Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.486622 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:57:21 crc kubenswrapper[4900]: E0127 12:57:21.488200 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.677281 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.762824 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-inventory\") pod \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.763048 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-ssh-key-openstack-edpm-ipam\") pod \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.763298 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhc2m\" (UniqueName: \"kubernetes.io/projected/cb831e66-cd2f-4a9f-8a72-385490b13aa3-kube-api-access-bhc2m\") pod \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.763370 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-repo-setup-combined-ca-bundle\") pod \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\" (UID: \"cb831e66-cd2f-4a9f-8a72-385490b13aa3\") " Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.770805 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb831e66-cd2f-4a9f-8a72-385490b13aa3-kube-api-access-bhc2m" (OuterVolumeSpecName: "kube-api-access-bhc2m") pod "cb831e66-cd2f-4a9f-8a72-385490b13aa3" (UID: "cb831e66-cd2f-4a9f-8a72-385490b13aa3"). InnerVolumeSpecName "kube-api-access-bhc2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.777098 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "cb831e66-cd2f-4a9f-8a72-385490b13aa3" (UID: "cb831e66-cd2f-4a9f-8a72-385490b13aa3"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.809620 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-inventory" (OuterVolumeSpecName: "inventory") pod "cb831e66-cd2f-4a9f-8a72-385490b13aa3" (UID: "cb831e66-cd2f-4a9f-8a72-385490b13aa3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.861873 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "cb831e66-cd2f-4a9f-8a72-385490b13aa3" (UID: "cb831e66-cd2f-4a9f-8a72-385490b13aa3"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.867619 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.867668 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhc2m\" (UniqueName: \"kubernetes.io/projected/cb831e66-cd2f-4a9f-8a72-385490b13aa3-kube-api-access-bhc2m\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.867682 4900 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.867696 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb831e66-cd2f-4a9f-8a72-385490b13aa3-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.949510 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" event={"ID":"cb831e66-cd2f-4a9f-8a72-385490b13aa3","Type":"ContainerDied","Data":"f299ad2574cabd51918f0cb645759a2ffb2b62ccfcdca535abc9a919415f4913"} Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.949622 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f299ad2574cabd51918f0cb645759a2ffb2b62ccfcdca535abc9a919415f4913" Jan 27 12:57:21 crc kubenswrapper[4900]: I0127 12:57:21.949751 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.079831 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv"] Jan 27 12:57:22 crc kubenswrapper[4900]: E0127 12:57:22.080677 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb831e66-cd2f-4a9f-8a72-385490b13aa3" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.080708 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb831e66-cd2f-4a9f-8a72-385490b13aa3" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 27 12:57:22 crc kubenswrapper[4900]: E0127 12:57:22.080779 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d30b65d6-3363-496a-97c9-8983b5332464" containerName="heat-cfnapi" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.080789 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="d30b65d6-3363-496a-97c9-8983b5332464" containerName="heat-cfnapi" Jan 27 12:57:22 crc kubenswrapper[4900]: E0127 12:57:22.080806 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b44caf11-0144-4419-a3fd-49a686e81f0a" containerName="heat-api" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.080824 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b44caf11-0144-4419-a3fd-49a686e81f0a" containerName="heat-api" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.081267 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb831e66-cd2f-4a9f-8a72-385490b13aa3" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.081299 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b44caf11-0144-4419-a3fd-49a686e81f0a" containerName="heat-api" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.081332 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="d30b65d6-3363-496a-97c9-8983b5332464" containerName="heat-cfnapi" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.082840 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.091762 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.091828 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.092045 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.092127 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.115648 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv"] Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.176467 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vh2c\" (UniqueName: \"kubernetes.io/projected/1cfbfd74-58ba-4625-a563-624a0e53d3c2-kube-api-access-8vh2c\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbhqv\" (UID: \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.176557 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cfbfd74-58ba-4625-a563-624a0e53d3c2-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbhqv\" (UID: \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.177160 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1cfbfd74-58ba-4625-a563-624a0e53d3c2-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbhqv\" (UID: \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.300553 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vh2c\" (UniqueName: \"kubernetes.io/projected/1cfbfd74-58ba-4625-a563-624a0e53d3c2-kube-api-access-8vh2c\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbhqv\" (UID: \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.300799 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cfbfd74-58ba-4625-a563-624a0e53d3c2-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbhqv\" (UID: \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.301285 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1cfbfd74-58ba-4625-a563-624a0e53d3c2-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbhqv\" (UID: \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.310464 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1cfbfd74-58ba-4625-a563-624a0e53d3c2-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbhqv\" (UID: \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.318144 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cfbfd74-58ba-4625-a563-624a0e53d3c2-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbhqv\" (UID: \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.327331 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vh2c\" (UniqueName: \"kubernetes.io/projected/1cfbfd74-58ba-4625-a563-624a0e53d3c2-kube-api-access-8vh2c\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbhqv\" (UID: \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" Jan 27 12:57:22 crc kubenswrapper[4900]: I0127 12:57:22.427243 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.093153 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv"] Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.412683 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-vbjbt"] Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.508402 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-vbjbt"] Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.651608 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-g6snh"] Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.654602 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.665386 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.693406 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-g6snh"] Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.809040 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-combined-ca-bundle\") pod \"aodh-db-sync-g6snh\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.809675 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glgpx\" (UniqueName: \"kubernetes.io/projected/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-kube-api-access-glgpx\") pod \"aodh-db-sync-g6snh\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.809768 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-scripts\") pod \"aodh-db-sync-g6snh\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.809826 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-config-data\") pod \"aodh-db-sync-g6snh\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.912699 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-config-data\") pod \"aodh-db-sync-g6snh\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.912816 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-combined-ca-bundle\") pod \"aodh-db-sync-g6snh\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.912967 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glgpx\" (UniqueName: \"kubernetes.io/projected/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-kube-api-access-glgpx\") pod \"aodh-db-sync-g6snh\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.913032 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-scripts\") pod \"aodh-db-sync-g6snh\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.917420 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-scripts\") pod \"aodh-db-sync-g6snh\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.918213 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-config-data\") pod \"aodh-db-sync-g6snh\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.940498 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glgpx\" (UniqueName: \"kubernetes.io/projected/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-kube-api-access-glgpx\") pod \"aodh-db-sync-g6snh\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.945366 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-combined-ca-bundle\") pod \"aodh-db-sync-g6snh\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.983671 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" event={"ID":"1cfbfd74-58ba-4625-a563-624a0e53d3c2","Type":"ContainerStarted","Data":"bf0ddbc6b531885bef5062f3759b1bb1d5d2b33b04a795631882dd7115edb432"} Jan 27 12:57:23 crc kubenswrapper[4900]: I0127 12:57:23.987144 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:24 crc kubenswrapper[4900]: I0127 12:57:24.509425 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e22dfd1e-8efb-4309-a859-79f256e6eb78" path="/var/lib/kubelet/pods/e22dfd1e-8efb-4309-a859-79f256e6eb78/volumes" Jan 27 12:57:24 crc kubenswrapper[4900]: I0127 12:57:24.578271 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-g6snh"] Jan 27 12:57:25 crc kubenswrapper[4900]: I0127 12:57:25.008728 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-g6snh" event={"ID":"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b","Type":"ContainerStarted","Data":"c68fa4028a9523d21b10077a2cd3cc8aea9ec55e88f8156e5958edf0a69a612b"} Jan 27 12:57:25 crc kubenswrapper[4900]: I0127 12:57:25.011293 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" event={"ID":"1cfbfd74-58ba-4625-a563-624a0e53d3c2","Type":"ContainerStarted","Data":"b775075df3d3341a0093f0f42bc2a9b6e489d8e6fcdc6d9177a7e1b1061b7d34"} Jan 27 12:57:25 crc kubenswrapper[4900]: I0127 12:57:25.033236 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" podStartSLOduration=2.248558228 podStartE2EDuration="3.033212523s" podCreationTimestamp="2026-01-27 12:57:22 +0000 UTC" firstStartedPulling="2026-01-27 12:57:23.115147681 +0000 UTC m=+1870.352175891" lastFinishedPulling="2026-01-27 12:57:23.899801976 +0000 UTC m=+1871.136830186" observedRunningTime="2026-01-27 12:57:25.031380479 +0000 UTC m=+1872.268408689" watchObservedRunningTime="2026-01-27 12:57:25.033212523 +0000 UTC m=+1872.270240733" Jan 27 12:57:28 crc kubenswrapper[4900]: I0127 12:57:28.064974 4900 generic.go:334] "Generic (PLEG): container finished" podID="1cfbfd74-58ba-4625-a563-624a0e53d3c2" containerID="b775075df3d3341a0093f0f42bc2a9b6e489d8e6fcdc6d9177a7e1b1061b7d34" exitCode=0 Jan 27 12:57:28 crc kubenswrapper[4900]: I0127 12:57:28.065504 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" event={"ID":"1cfbfd74-58ba-4625-a563-624a0e53d3c2","Type":"ContainerDied","Data":"b775075df3d3341a0093f0f42bc2a9b6e489d8e6fcdc6d9177a7e1b1061b7d34"} Jan 27 12:57:29 crc kubenswrapper[4900]: E0127 12:57:29.531932 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef is running failed: container process not found" containerID="f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 27 12:57:29 crc kubenswrapper[4900]: E0127 12:57:29.533234 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef is running failed: container process not found" containerID="f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 27 12:57:29 crc kubenswrapper[4900]: E0127 12:57:29.538276 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef is running failed: container process not found" containerID="f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 27 12:57:29 crc kubenswrapper[4900]: E0127 12:57:29.538382 4900 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef is running failed: container process not found" probeType="Readiness" pod="openstack/heat-engine-55dc67899d-kc667" podUID="eed3fb3f-18d6-4382-85f1-77235d870e91" containerName="heat-engine" Jan 27 12:57:30 crc kubenswrapper[4900]: I0127 12:57:30.139131 4900 generic.go:334] "Generic (PLEG): container finished" podID="eed3fb3f-18d6-4382-85f1-77235d870e91" containerID="f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef" exitCode=0 Jan 27 12:57:30 crc kubenswrapper[4900]: I0127 12:57:30.139292 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-55dc67899d-kc667" event={"ID":"eed3fb3f-18d6-4382-85f1-77235d870e91","Type":"ContainerDied","Data":"f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef"} Jan 27 12:57:30 crc kubenswrapper[4900]: I0127 12:57:30.678280 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 27 12:57:30 crc kubenswrapper[4900]: I0127 12:57:30.718663 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-2" Jan 27 12:57:30 crc kubenswrapper[4900]: I0127 12:57:30.842374 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 27 12:57:30 crc kubenswrapper[4900]: I0127 12:57:30.885888 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:57:30 crc kubenswrapper[4900]: I0127 12:57:30.892390 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" Jan 27 12:57:30 crc kubenswrapper[4900]: I0127 12:57:30.985825 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-config-data-custom\") pod \"eed3fb3f-18d6-4382-85f1-77235d870e91\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " Jan 27 12:57:30 crc kubenswrapper[4900]: I0127 12:57:30.986181 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-config-data\") pod \"eed3fb3f-18d6-4382-85f1-77235d870e91\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " Jan 27 12:57:30 crc kubenswrapper[4900]: I0127 12:57:30.986397 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-combined-ca-bundle\") pod \"eed3fb3f-18d6-4382-85f1-77235d870e91\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " Jan 27 12:57:30 crc kubenswrapper[4900]: I0127 12:57:30.986417 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cfbfd74-58ba-4625-a563-624a0e53d3c2-inventory\") pod \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\" (UID: \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\") " Jan 27 12:57:30 crc kubenswrapper[4900]: I0127 12:57:30.986481 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhzqx\" (UniqueName: \"kubernetes.io/projected/eed3fb3f-18d6-4382-85f1-77235d870e91-kube-api-access-zhzqx\") pod \"eed3fb3f-18d6-4382-85f1-77235d870e91\" (UID: \"eed3fb3f-18d6-4382-85f1-77235d870e91\") " Jan 27 12:57:30 crc kubenswrapper[4900]: I0127 12:57:30.986544 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vh2c\" (UniqueName: \"kubernetes.io/projected/1cfbfd74-58ba-4625-a563-624a0e53d3c2-kube-api-access-8vh2c\") pod \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\" (UID: \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\") " Jan 27 12:57:30 crc kubenswrapper[4900]: I0127 12:57:30.986632 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1cfbfd74-58ba-4625-a563-624a0e53d3c2-ssh-key-openstack-edpm-ipam\") pod \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\" (UID: \"1cfbfd74-58ba-4625-a563-624a0e53d3c2\") " Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.000235 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "eed3fb3f-18d6-4382-85f1-77235d870e91" (UID: "eed3fb3f-18d6-4382-85f1-77235d870e91"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.010701 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eed3fb3f-18d6-4382-85f1-77235d870e91-kube-api-access-zhzqx" (OuterVolumeSpecName: "kube-api-access-zhzqx") pod "eed3fb3f-18d6-4382-85f1-77235d870e91" (UID: "eed3fb3f-18d6-4382-85f1-77235d870e91"). InnerVolumeSpecName "kube-api-access-zhzqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.011285 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cfbfd74-58ba-4625-a563-624a0e53d3c2-kube-api-access-8vh2c" (OuterVolumeSpecName: "kube-api-access-8vh2c") pod "1cfbfd74-58ba-4625-a563-624a0e53d3c2" (UID: "1cfbfd74-58ba-4625-a563-624a0e53d3c2"). InnerVolumeSpecName "kube-api-access-8vh2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.071594 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cfbfd74-58ba-4625-a563-624a0e53d3c2-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "1cfbfd74-58ba-4625-a563-624a0e53d3c2" (UID: "1cfbfd74-58ba-4625-a563-624a0e53d3c2"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.095484 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhzqx\" (UniqueName: \"kubernetes.io/projected/eed3fb3f-18d6-4382-85f1-77235d870e91-kube-api-access-zhzqx\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.095531 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vh2c\" (UniqueName: \"kubernetes.io/projected/1cfbfd74-58ba-4625-a563-624a0e53d3c2-kube-api-access-8vh2c\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.095549 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1cfbfd74-58ba-4625-a563-624a0e53d3c2-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.095563 4900 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.120861 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-config-data" (OuterVolumeSpecName: "config-data") pod "eed3fb3f-18d6-4382-85f1-77235d870e91" (UID: "eed3fb3f-18d6-4382-85f1-77235d870e91"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.153789 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eed3fb3f-18d6-4382-85f1-77235d870e91" (UID: "eed3fb3f-18d6-4382-85f1-77235d870e91"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.153903 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cfbfd74-58ba-4625-a563-624a0e53d3c2-inventory" (OuterVolumeSpecName: "inventory") pod "1cfbfd74-58ba-4625-a563-624a0e53d3c2" (UID: "1cfbfd74-58ba-4625-a563-624a0e53d3c2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.174816 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-g6snh" event={"ID":"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b","Type":"ContainerStarted","Data":"2650982d4f0073af8e6f563035a4cd99683b2d48615435fd802cea1d651a7826"} Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.178497 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" event={"ID":"1cfbfd74-58ba-4625-a563-624a0e53d3c2","Type":"ContainerDied","Data":"bf0ddbc6b531885bef5062f3759b1bb1d5d2b33b04a795631882dd7115edb432"} Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.178539 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf0ddbc6b531885bef5062f3759b1bb1d5d2b33b04a795631882dd7115edb432" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.178589 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbhqv" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.190466 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-55dc67899d-kc667" event={"ID":"eed3fb3f-18d6-4382-85f1-77235d870e91","Type":"ContainerDied","Data":"4c2f59662c3f382fab739515386eb317d39939dfc44cb92a96b732159b02746e"} Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.190557 4900 scope.go:117] "RemoveContainer" containerID="f4639689550f0931c35a69fb5284d4ec392f3257e6f2648e457bfeb00a81d1ef" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.190841 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-55dc67899d-kc667" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.198202 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.198238 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cfbfd74-58ba-4625-a563-624a0e53d3c2-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.198248 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eed3fb3f-18d6-4382-85f1-77235d870e91-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.216753 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-g6snh" podStartSLOduration=2.076429149 podStartE2EDuration="8.216728576s" podCreationTimestamp="2026-01-27 12:57:23 +0000 UTC" firstStartedPulling="2026-01-27 12:57:24.546211588 +0000 UTC m=+1871.783239798" lastFinishedPulling="2026-01-27 12:57:30.686511015 +0000 UTC m=+1877.923539225" observedRunningTime="2026-01-27 12:57:31.204657035 +0000 UTC m=+1878.441685245" watchObservedRunningTime="2026-01-27 12:57:31.216728576 +0000 UTC m=+1878.453756786" Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.320967 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-55dc67899d-kc667"] Jan 27 12:57:31 crc kubenswrapper[4900]: I0127 12:57:31.346047 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-55dc67899d-kc667"] Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.065982 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m"] Jan 27 12:57:32 crc kubenswrapper[4900]: E0127 12:57:32.067201 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cfbfd74-58ba-4625-a563-624a0e53d3c2" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.067226 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cfbfd74-58ba-4625-a563-624a0e53d3c2" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 27 12:57:32 crc kubenswrapper[4900]: E0127 12:57:32.067273 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eed3fb3f-18d6-4382-85f1-77235d870e91" containerName="heat-engine" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.067282 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="eed3fb3f-18d6-4382-85f1-77235d870e91" containerName="heat-engine" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.067596 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cfbfd74-58ba-4625-a563-624a0e53d3c2" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.067637 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="eed3fb3f-18d6-4382-85f1-77235d870e91" containerName="heat-engine" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.068870 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.072730 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.073002 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.073046 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.073208 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.077886 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m"] Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.147354 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.147438 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rnbb\" (UniqueName: \"kubernetes.io/projected/53908aae-9e96-453f-91e6-f17eeb2ce37a-kube-api-access-2rnbb\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.147705 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.147755 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.250639 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.250907 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.251323 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.251519 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rnbb\" (UniqueName: \"kubernetes.io/projected/53908aae-9e96-453f-91e6-f17eeb2ce37a-kube-api-access-2rnbb\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.260886 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.274733 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.298861 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.299629 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rnbb\" (UniqueName: \"kubernetes.io/projected/53908aae-9e96-453f-91e6-f17eeb2ce37a-kube-api-access-2rnbb\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.482581 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 12:57:32 crc kubenswrapper[4900]: I0127 12:57:32.503224 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eed3fb3f-18d6-4382-85f1-77235d870e91" path="/var/lib/kubelet/pods/eed3fb3f-18d6-4382-85f1-77235d870e91/volumes" Jan 27 12:57:33 crc kubenswrapper[4900]: I0127 12:57:33.482300 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:57:33 crc kubenswrapper[4900]: E0127 12:57:33.483538 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:57:33 crc kubenswrapper[4900]: I0127 12:57:33.564293 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m"] Jan 27 12:57:33 crc kubenswrapper[4900]: W0127 12:57:33.564714 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod53908aae_9e96_453f_91e6_f17eeb2ce37a.slice/crio-5a864bfa023cb6636fe15bc661df150cdc8a77a9454a58ff8d00452e35fef37c WatchSource:0}: Error finding container 5a864bfa023cb6636fe15bc661df150cdc8a77a9454a58ff8d00452e35fef37c: Status 404 returned error can't find the container with id 5a864bfa023cb6636fe15bc661df150cdc8a77a9454a58ff8d00452e35fef37c Jan 27 12:57:34 crc kubenswrapper[4900]: I0127 12:57:34.237808 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" event={"ID":"53908aae-9e96-453f-91e6-f17eeb2ce37a","Type":"ContainerStarted","Data":"5a864bfa023cb6636fe15bc661df150cdc8a77a9454a58ff8d00452e35fef37c"} Jan 27 12:57:35 crc kubenswrapper[4900]: I0127 12:57:35.252715 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" event={"ID":"53908aae-9e96-453f-91e6-f17eeb2ce37a","Type":"ContainerStarted","Data":"da77867d6f93fdc8ee8ab73693838c98c0ff5f9274c979dac3724997daf40f95"} Jan 27 12:57:35 crc kubenswrapper[4900]: I0127 12:57:35.256481 4900 generic.go:334] "Generic (PLEG): container finished" podID="a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b" containerID="2650982d4f0073af8e6f563035a4cd99683b2d48615435fd802cea1d651a7826" exitCode=0 Jan 27 12:57:35 crc kubenswrapper[4900]: I0127 12:57:35.256618 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-g6snh" event={"ID":"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b","Type":"ContainerDied","Data":"2650982d4f0073af8e6f563035a4cd99683b2d48615435fd802cea1d651a7826"} Jan 27 12:57:35 crc kubenswrapper[4900]: I0127 12:57:35.296456 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" podStartSLOduration=2.814488985 podStartE2EDuration="3.296425121s" podCreationTimestamp="2026-01-27 12:57:32 +0000 UTC" firstStartedPulling="2026-01-27 12:57:33.569145745 +0000 UTC m=+1880.806173955" lastFinishedPulling="2026-01-27 12:57:34.051081871 +0000 UTC m=+1881.288110091" observedRunningTime="2026-01-27 12:57:35.269806957 +0000 UTC m=+1882.506835157" watchObservedRunningTime="2026-01-27 12:57:35.296425121 +0000 UTC m=+1882.533453331" Jan 27 12:57:36 crc kubenswrapper[4900]: I0127 12:57:36.157527 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-1" podUID="b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" containerName="rabbitmq" containerID="cri-o://05a50a9a845530e54c98704afd345c088488d5fa3f2d3fe72bec046926ca868d" gracePeriod=604795 Jan 27 12:57:36 crc kubenswrapper[4900]: I0127 12:57:36.191023 4900 scope.go:117] "RemoveContainer" containerID="9de16403e647a789a56826b863a8d06a397726a07eaee105988d975a31657665" Jan 27 12:57:36 crc kubenswrapper[4900]: I0127 12:57:36.831694 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:36 crc kubenswrapper[4900]: I0127 12:57:36.986130 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-combined-ca-bundle\") pod \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " Jan 27 12:57:36 crc kubenswrapper[4900]: I0127 12:57:36.986174 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-scripts\") pod \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " Jan 27 12:57:36 crc kubenswrapper[4900]: I0127 12:57:36.986332 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-config-data\") pod \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " Jan 27 12:57:36 crc kubenswrapper[4900]: I0127 12:57:36.986377 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glgpx\" (UniqueName: \"kubernetes.io/projected/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-kube-api-access-glgpx\") pod \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\" (UID: \"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b\") " Jan 27 12:57:37 crc kubenswrapper[4900]: I0127 12:57:37.001605 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-kube-api-access-glgpx" (OuterVolumeSpecName: "kube-api-access-glgpx") pod "a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b" (UID: "a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b"). InnerVolumeSpecName "kube-api-access-glgpx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:57:37 crc kubenswrapper[4900]: I0127 12:57:37.012888 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-scripts" (OuterVolumeSpecName: "scripts") pod "a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b" (UID: "a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:37 crc kubenswrapper[4900]: I0127 12:57:37.035710 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-config-data" (OuterVolumeSpecName: "config-data") pod "a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b" (UID: "a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:37 crc kubenswrapper[4900]: I0127 12:57:37.038418 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b" (UID: "a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:37 crc kubenswrapper[4900]: I0127 12:57:37.090488 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:37 crc kubenswrapper[4900]: I0127 12:57:37.090553 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:37 crc kubenswrapper[4900]: I0127 12:57:37.090566 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:37 crc kubenswrapper[4900]: I0127 12:57:37.090581 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glgpx\" (UniqueName: \"kubernetes.io/projected/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b-kube-api-access-glgpx\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:37 crc kubenswrapper[4900]: I0127 12:57:37.285869 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-g6snh" event={"ID":"a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b","Type":"ContainerDied","Data":"c68fa4028a9523d21b10077a2cd3cc8aea9ec55e88f8156e5958edf0a69a612b"} Jan 27 12:57:37 crc kubenswrapper[4900]: I0127 12:57:37.285910 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c68fa4028a9523d21b10077a2cd3cc8aea9ec55e88f8156e5958edf0a69a612b" Jan 27 12:57:37 crc kubenswrapper[4900]: I0127 12:57:37.285974 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-g6snh" Jan 27 12:57:38 crc kubenswrapper[4900]: I0127 12:57:38.839172 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 27 12:57:38 crc kubenswrapper[4900]: I0127 12:57:38.841137 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-api" containerID="cri-o://51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4" gracePeriod=30 Jan 27 12:57:38 crc kubenswrapper[4900]: I0127 12:57:38.841165 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-listener" containerID="cri-o://5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce" gracePeriod=30 Jan 27 12:57:38 crc kubenswrapper[4900]: I0127 12:57:38.841173 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-notifier" containerID="cri-o://1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938" gracePeriod=30 Jan 27 12:57:38 crc kubenswrapper[4900]: I0127 12:57:38.841184 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-evaluator" containerID="cri-o://8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c" gracePeriod=30 Jan 27 12:57:39 crc kubenswrapper[4900]: I0127 12:57:39.315371 4900 generic.go:334] "Generic (PLEG): container finished" podID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerID="51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4" exitCode=0 Jan 27 12:57:39 crc kubenswrapper[4900]: I0127 12:57:39.315445 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8cea539e-f7a9-4e80-bc7b-1645865568ba","Type":"ContainerDied","Data":"51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4"} Jan 27 12:57:40 crc kubenswrapper[4900]: I0127 12:57:40.331829 4900 generic.go:334] "Generic (PLEG): container finished" podID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerID="8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c" exitCode=0 Jan 27 12:57:40 crc kubenswrapper[4900]: I0127 12:57:40.332148 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8cea539e-f7a9-4e80-bc7b-1645865568ba","Type":"ContainerDied","Data":"8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c"} Jan 27 12:57:40 crc kubenswrapper[4900]: I0127 12:57:40.896445 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-1" podUID="b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.131:5671: connect: connection refused" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.092688 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.098344 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.296133 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-server-conf\") pod \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.296214 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-erlang-cookie-secret\") pod \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.296336 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-scripts\") pod \"8cea539e-f7a9-4e80-bc7b-1645865568ba\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.296390 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-pod-info\") pod \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.297037 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\") pod \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.297108 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-erlang-cookie\") pod \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.297200 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-config-data\") pod \"8cea539e-f7a9-4e80-bc7b-1645865568ba\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.297256 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-plugins\") pod \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.297372 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ksvfv\" (UniqueName: \"kubernetes.io/projected/8cea539e-f7a9-4e80-bc7b-1645865568ba-kube-api-access-ksvfv\") pod \"8cea539e-f7a9-4e80-bc7b-1645865568ba\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.297441 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-config-data\") pod \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.297497 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-confd\") pod \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.297530 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-combined-ca-bundle\") pod \"8cea539e-f7a9-4e80-bc7b-1645865568ba\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.297569 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-internal-tls-certs\") pod \"8cea539e-f7a9-4e80-bc7b-1645865568ba\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.297609 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7db8t\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-kube-api-access-7db8t\") pod \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.297654 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-public-tls-certs\") pod \"8cea539e-f7a9-4e80-bc7b-1645865568ba\" (UID: \"8cea539e-f7a9-4e80-bc7b-1645865568ba\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.297744 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-tls\") pod \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.297773 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-plugins-conf\") pod \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\" (UID: \"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5\") " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.301040 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" (UID: "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.307044 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" (UID: "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.307735 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-pod-info" (OuterVolumeSpecName: "pod-info") pod "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" (UID: "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.311044 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea539e-f7a9-4e80-bc7b-1645865568ba-kube-api-access-ksvfv" (OuterVolumeSpecName: "kube-api-access-ksvfv") pod "8cea539e-f7a9-4e80-bc7b-1645865568ba" (UID: "8cea539e-f7a9-4e80-bc7b-1645865568ba"). InnerVolumeSpecName "kube-api-access-ksvfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.313454 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" (UID: "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.314018 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" (UID: "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.321345 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-scripts" (OuterVolumeSpecName: "scripts") pod "8cea539e-f7a9-4e80-bc7b-1645865568ba" (UID: "8cea539e-f7a9-4e80-bc7b-1645865568ba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.322581 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" (UID: "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.322989 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-kube-api-access-7db8t" (OuterVolumeSpecName: "kube-api-access-7db8t") pod "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" (UID: "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5"). InnerVolumeSpecName "kube-api-access-7db8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.352529 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-config-data" (OuterVolumeSpecName: "config-data") pod "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" (UID: "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.386392 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20" (OuterVolumeSpecName: "persistence") pod "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" (UID: "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5"). InnerVolumeSpecName "pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.419502 4900 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\") on node \"crc\" " Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.419555 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.419574 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.419596 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ksvfv\" (UniqueName: \"kubernetes.io/projected/8cea539e-f7a9-4e80-bc7b-1645865568ba-kube-api-access-ksvfv\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.419610 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.419628 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7db8t\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-kube-api-access-7db8t\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.419650 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.419664 4900 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.419675 4900 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.419687 4900 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-scripts\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.419698 4900 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-pod-info\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.512257 4900 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.512663 4900 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20") on node "crc" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.514171 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-server-conf" (OuterVolumeSpecName: "server-conf") pod "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" (UID: "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.517302 4900 generic.go:334] "Generic (PLEG): container finished" podID="b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" containerID="05a50a9a845530e54c98704afd345c088488d5fa3f2d3fe72bec046926ca868d" exitCode=0 Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.517464 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5","Type":"ContainerDied","Data":"05a50a9a845530e54c98704afd345c088488d5fa3f2d3fe72bec046926ca868d"} Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.517509 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5","Type":"ContainerDied","Data":"6fd592117ab6e67e028bf454492a86764928942a9bec21c9c70fc056bf93fc9b"} Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.517532 4900 scope.go:117] "RemoveContainer" containerID="05a50a9a845530e54c98704afd345c088488d5fa3f2d3fe72bec046926ca868d" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.517945 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.524771 4900 generic.go:334] "Generic (PLEG): container finished" podID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerID="5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce" exitCode=0 Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.524809 4900 generic.go:334] "Generic (PLEG): container finished" podID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerID="1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938" exitCode=0 Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.524836 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8cea539e-f7a9-4e80-bc7b-1645865568ba","Type":"ContainerDied","Data":"5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce"} Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.524870 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8cea539e-f7a9-4e80-bc7b-1645865568ba","Type":"ContainerDied","Data":"1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938"} Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.524881 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8cea539e-f7a9-4e80-bc7b-1645865568ba","Type":"ContainerDied","Data":"b652040978129f123b3f14b3f7d95cdb79944e53b79156c3a609174af7ddbaa1"} Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.524983 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.533623 4900 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-server-conf\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.533654 4900 reconciler_common.go:293] "Volume detached for volume \"pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.546301 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8cea539e-f7a9-4e80-bc7b-1645865568ba" (UID: "8cea539e-f7a9-4e80-bc7b-1645865568ba"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.554182 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8cea539e-f7a9-4e80-bc7b-1645865568ba" (UID: "8cea539e-f7a9-4e80-bc7b-1645865568ba"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.592098 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8cea539e-f7a9-4e80-bc7b-1645865568ba" (UID: "8cea539e-f7a9-4e80-bc7b-1645865568ba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.595289 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" (UID: "b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.628542 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-config-data" (OuterVolumeSpecName: "config-data") pod "8cea539e-f7a9-4e80-bc7b-1645865568ba" (UID: "8cea539e-f7a9-4e80-bc7b-1645865568ba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.637253 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.637293 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.637304 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.637315 4900 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.637324 4900 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8cea539e-f7a9-4e80-bc7b-1645865568ba-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.748434 4900 scope.go:117] "RemoveContainer" containerID="6f1f46121f527e90245df33b315070a32d8b9124af3c0346e9752c25456c84ae" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.802106 4900 scope.go:117] "RemoveContainer" containerID="05a50a9a845530e54c98704afd345c088488d5fa3f2d3fe72bec046926ca868d" Jan 27 12:57:43 crc kubenswrapper[4900]: E0127 12:57:43.805344 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05a50a9a845530e54c98704afd345c088488d5fa3f2d3fe72bec046926ca868d\": container with ID starting with 05a50a9a845530e54c98704afd345c088488d5fa3f2d3fe72bec046926ca868d not found: ID does not exist" containerID="05a50a9a845530e54c98704afd345c088488d5fa3f2d3fe72bec046926ca868d" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.805413 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05a50a9a845530e54c98704afd345c088488d5fa3f2d3fe72bec046926ca868d"} err="failed to get container status \"05a50a9a845530e54c98704afd345c088488d5fa3f2d3fe72bec046926ca868d\": rpc error: code = NotFound desc = could not find container \"05a50a9a845530e54c98704afd345c088488d5fa3f2d3fe72bec046926ca868d\": container with ID starting with 05a50a9a845530e54c98704afd345c088488d5fa3f2d3fe72bec046926ca868d not found: ID does not exist" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.805453 4900 scope.go:117] "RemoveContainer" containerID="6f1f46121f527e90245df33b315070a32d8b9124af3c0346e9752c25456c84ae" Jan 27 12:57:43 crc kubenswrapper[4900]: E0127 12:57:43.807336 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f1f46121f527e90245df33b315070a32d8b9124af3c0346e9752c25456c84ae\": container with ID starting with 6f1f46121f527e90245df33b315070a32d8b9124af3c0346e9752c25456c84ae not found: ID does not exist" containerID="6f1f46121f527e90245df33b315070a32d8b9124af3c0346e9752c25456c84ae" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.807381 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f1f46121f527e90245df33b315070a32d8b9124af3c0346e9752c25456c84ae"} err="failed to get container status \"6f1f46121f527e90245df33b315070a32d8b9124af3c0346e9752c25456c84ae\": rpc error: code = NotFound desc = could not find container \"6f1f46121f527e90245df33b315070a32d8b9124af3c0346e9752c25456c84ae\": container with ID starting with 6f1f46121f527e90245df33b315070a32d8b9124af3c0346e9752c25456c84ae not found: ID does not exist" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.807410 4900 scope.go:117] "RemoveContainer" containerID="5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.843849 4900 scope.go:117] "RemoveContainer" containerID="1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.879587 4900 scope.go:117] "RemoveContainer" containerID="8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.906614 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.939192 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.946318 4900 scope.go:117] "RemoveContainer" containerID="51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4" Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.975150 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 27 12:57:43 crc kubenswrapper[4900]: I0127 12:57:43.996104 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.015877 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-1"] Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.016045 4900 scope.go:117] "RemoveContainer" containerID="5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce" Jan 27 12:57:44 crc kubenswrapper[4900]: E0127 12:57:44.016917 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" containerName="setup-container" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.016940 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" containerName="setup-container" Jan 27 12:57:44 crc kubenswrapper[4900]: E0127 12:57:44.016950 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-evaluator" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.016956 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-evaluator" Jan 27 12:57:44 crc kubenswrapper[4900]: E0127 12:57:44.016974 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-api" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.016986 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-api" Jan 27 12:57:44 crc kubenswrapper[4900]: E0127 12:57:44.017017 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b" containerName="aodh-db-sync" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.017024 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b" containerName="aodh-db-sync" Jan 27 12:57:44 crc kubenswrapper[4900]: E0127 12:57:44.017042 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" containerName="rabbitmq" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.017048 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" containerName="rabbitmq" Jan 27 12:57:44 crc kubenswrapper[4900]: E0127 12:57:44.017095 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-notifier" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.017101 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-notifier" Jan 27 12:57:44 crc kubenswrapper[4900]: E0127 12:57:44.017111 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-listener" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.017117 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-listener" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.017356 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b" containerName="aodh-db-sync" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.017370 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" containerName="rabbitmq" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.017385 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-api" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.017399 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-notifier" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.017411 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-evaluator" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.017418 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" containerName="aodh-listener" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.019332 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: E0127 12:57:44.020036 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce\": container with ID starting with 5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce not found: ID does not exist" containerID="5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.020944 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce"} err="failed to get container status \"5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce\": rpc error: code = NotFound desc = could not find container \"5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce\": container with ID starting with 5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce not found: ID does not exist" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.021039 4900 scope.go:117] "RemoveContainer" containerID="1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938" Jan 27 12:57:44 crc kubenswrapper[4900]: E0127 12:57:44.023681 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938\": container with ID starting with 1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938 not found: ID does not exist" containerID="1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.023717 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938"} err="failed to get container status \"1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938\": rpc error: code = NotFound desc = could not find container \"1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938\": container with ID starting with 1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938 not found: ID does not exist" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.023740 4900 scope.go:117] "RemoveContainer" containerID="8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c" Jan 27 12:57:44 crc kubenswrapper[4900]: E0127 12:57:44.029495 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c\": container with ID starting with 8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c not found: ID does not exist" containerID="8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.029567 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c"} err="failed to get container status \"8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c\": rpc error: code = NotFound desc = could not find container \"8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c\": container with ID starting with 8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c not found: ID does not exist" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.029639 4900 scope.go:117] "RemoveContainer" containerID="51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.031396 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 27 12:57:44 crc kubenswrapper[4900]: E0127 12:57:44.034832 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4\": container with ID starting with 51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4 not found: ID does not exist" containerID="51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.034897 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4"} err="failed to get container status \"51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4\": rpc error: code = NotFound desc = could not find container \"51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4\": container with ID starting with 51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4 not found: ID does not exist" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.034934 4900 scope.go:117] "RemoveContainer" containerID="5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.037889 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce"} err="failed to get container status \"5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce\": rpc error: code = NotFound desc = could not find container \"5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce\": container with ID starting with 5a4f1ee24ce4635a3962ccca8653e3c6d9c975094e317f0aa603d2f13bd6f6ce not found: ID does not exist" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.037952 4900 scope.go:117] "RemoveContainer" containerID="1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.038763 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938"} err="failed to get container status \"1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938\": rpc error: code = NotFound desc = could not find container \"1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938\": container with ID starting with 1dd13012f744483a345a3210efdab4cb2847f3a890c7b79a21597eadafa29938 not found: ID does not exist" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.038796 4900 scope.go:117] "RemoveContainer" containerID="8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.039271 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c"} err="failed to get container status \"8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c\": rpc error: code = NotFound desc = could not find container \"8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c\": container with ID starting with 8d2c2dbfcd5b408a954544192c339ce3733f7a2cc17a8ebcc921b9ebea19140c not found: ID does not exist" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.039287 4900 scope.go:117] "RemoveContainer" containerID="51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.040522 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4"} err="failed to get container status \"51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4\": rpc error: code = NotFound desc = could not find container \"51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4\": container with ID starting with 51823fd12abfa2e0cdb381391d0849ffb50a371aad272e3ea8f122a8b8a7e2d4 not found: ID does not exist" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.040921 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.044624 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.046596 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.050520 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.050750 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.050896 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.051116 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-kgljt" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.070946 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.154882 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8btnc\" (UniqueName: \"kubernetes.io/projected/cf8673eb-231e-4350-891d-c610bf69df5e-kube-api-access-8btnc\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.154944 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-server-conf\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.154977 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.155092 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-public-tls-certs\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.155131 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-scripts\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.155158 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.155196 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-config-data\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.155241 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.155290 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.155368 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-internal-tls-certs\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.155406 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-pod-info\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.155531 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-config-data\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.155585 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.155657 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.155698 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.155762 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfdsj\" (UniqueName: \"kubernetes.io/projected/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-kube-api-access-kfdsj\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.155787 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-combined-ca-bundle\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.258314 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.258384 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.258448 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfdsj\" (UniqueName: \"kubernetes.io/projected/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-kube-api-access-kfdsj\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.258476 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-combined-ca-bundle\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.258535 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8btnc\" (UniqueName: \"kubernetes.io/projected/cf8673eb-231e-4350-891d-c610bf69df5e-kube-api-access-8btnc\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.258570 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-server-conf\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.258895 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.259368 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.259428 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.259550 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-public-tls-certs\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.259586 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-scripts\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.259609 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.259642 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-config-data\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.260245 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-server-conf\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.260784 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.260821 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0e710899f4a916659691db79ca451e0bb173d45a0872809026b858f8d492b434/globalmount\"" pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.260928 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.261224 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.261269 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.261360 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-internal-tls-certs\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.261403 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-pod-info\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.261544 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-config-data\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.261601 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.262955 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-config-data\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.266819 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-scripts\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.267414 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-combined-ca-bundle\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.267447 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-public-tls-certs\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.269022 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-pod-info\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.270720 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.272104 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-internal-tls-certs\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.276558 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.277510 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf8673eb-231e-4350-891d-c610bf69df5e-config-data\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.279590 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.279949 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8btnc\" (UniqueName: \"kubernetes.io/projected/cf8673eb-231e-4350-891d-c610bf69df5e-kube-api-access-8btnc\") pod \"aodh-0\" (UID: \"cf8673eb-231e-4350-891d-c610bf69df5e\") " pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.281949 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfdsj\" (UniqueName: \"kubernetes.io/projected/b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc-kube-api-access-kfdsj\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.354306 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-30eebd94-9113-4ef4-b44c-1b9aa9b54e20\") pod \"rabbitmq-server-1\" (UID: \"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc\") " pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.366321 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.370967 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.601198 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea539e-f7a9-4e80-bc7b-1645865568ba" path="/var/lib/kubelet/pods/8cea539e-f7a9-4e80-bc7b-1645865568ba/volumes" Jan 27 12:57:44 crc kubenswrapper[4900]: I0127 12:57:44.602667 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5" path="/var/lib/kubelet/pods/b703ff05-40e9-4e7e-bbf4-463a6e8a9ed5/volumes" Jan 27 12:57:45 crc kubenswrapper[4900]: I0127 12:57:45.090279 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 27 12:57:45 crc kubenswrapper[4900]: I0127 12:57:45.101911 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 27 12:57:45 crc kubenswrapper[4900]: W0127 12:57:45.116477 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb4992f59_80b7_46cd_b0a3_f7cc47f5a1bc.slice/crio-b44729469133866456010178b02cd94fb8d22beec5c83fa4c37bc1f98aa82922 WatchSource:0}: Error finding container b44729469133866456010178b02cd94fb8d22beec5c83fa4c37bc1f98aa82922: Status 404 returned error can't find the container with id b44729469133866456010178b02cd94fb8d22beec5c83fa4c37bc1f98aa82922 Jan 27 12:57:45 crc kubenswrapper[4900]: I0127 12:57:45.572630 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cf8673eb-231e-4350-891d-c610bf69df5e","Type":"ContainerStarted","Data":"bbcf0c9499e86477495fa87f4855e1a284065ae9bbb821f4062ed744f960408f"} Jan 27 12:57:45 crc kubenswrapper[4900]: I0127 12:57:45.576423 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc","Type":"ContainerStarted","Data":"b44729469133866456010178b02cd94fb8d22beec5c83fa4c37bc1f98aa82922"} Jan 27 12:57:46 crc kubenswrapper[4900]: I0127 12:57:46.601381 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cf8673eb-231e-4350-891d-c610bf69df5e","Type":"ContainerStarted","Data":"d3ed7ff9f4b6f455e0907bf3caf3c99ad329581ae1b6f8cf95edf1201139cf36"} Jan 27 12:57:47 crc kubenswrapper[4900]: I0127 12:57:47.482706 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:57:47 crc kubenswrapper[4900]: E0127 12:57:47.483613 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 12:57:47 crc kubenswrapper[4900]: I0127 12:57:47.616208 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cf8673eb-231e-4350-891d-c610bf69df5e","Type":"ContainerStarted","Data":"b67546c0455b74b8a1296e906d7a7658899e16e97d8cd58eff0a7d16caf386dc"} Jan 27 12:57:47 crc kubenswrapper[4900]: I0127 12:57:47.617934 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc","Type":"ContainerStarted","Data":"ef07fb7316cc4916cf0245ef8d1109b4294636b9201fa39ed797deb6754c4c9f"} Jan 27 12:57:49 crc kubenswrapper[4900]: I0127 12:57:49.650814 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cf8673eb-231e-4350-891d-c610bf69df5e","Type":"ContainerStarted","Data":"39ef694e76f2051c565a9a2f6dd3d9dd178fe5a7bfa0d056350656eb37d8fac7"} Jan 27 12:57:50 crc kubenswrapper[4900]: I0127 12:57:50.664606 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"cf8673eb-231e-4350-891d-c610bf69df5e","Type":"ContainerStarted","Data":"5c6d73f15bce0e08d7a666088fcab63f182c2cdf987cb2be2839eedcfd17e584"} Jan 27 12:57:50 crc kubenswrapper[4900]: I0127 12:57:50.692854 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.801914966 podStartE2EDuration="7.692829566s" podCreationTimestamp="2026-01-27 12:57:43 +0000 UTC" firstStartedPulling="2026-01-27 12:57:45.102032401 +0000 UTC m=+1892.339060611" lastFinishedPulling="2026-01-27 12:57:49.992947001 +0000 UTC m=+1897.229975211" observedRunningTime="2026-01-27 12:57:50.688995045 +0000 UTC m=+1897.926023255" watchObservedRunningTime="2026-01-27 12:57:50.692829566 +0000 UTC m=+1897.929857776" Jan 27 12:58:03 crc kubenswrapper[4900]: I0127 12:58:03.482404 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 12:58:04 crc kubenswrapper[4900]: I0127 12:58:04.095793 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"14bb46af03aa708f5b56f9d73717659956f8ecfa5ffc85cf276e4cc701eaafcb"} Jan 27 12:58:21 crc kubenswrapper[4900]: I0127 12:58:21.372724 4900 generic.go:334] "Generic (PLEG): container finished" podID="b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc" containerID="ef07fb7316cc4916cf0245ef8d1109b4294636b9201fa39ed797deb6754c4c9f" exitCode=0 Jan 27 12:58:21 crc kubenswrapper[4900]: I0127 12:58:21.372844 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc","Type":"ContainerDied","Data":"ef07fb7316cc4916cf0245ef8d1109b4294636b9201fa39ed797deb6754c4c9f"} Jan 27 12:58:22 crc kubenswrapper[4900]: I0127 12:58:22.394549 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc","Type":"ContainerStarted","Data":"b54024c38b2b8c477444cb4c10dbe4e862cb57c8b1288134a962b4ad4d490dff"} Jan 27 12:58:22 crc kubenswrapper[4900]: I0127 12:58:22.395623 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-1" Jan 27 12:58:22 crc kubenswrapper[4900]: I0127 12:58:22.430351 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-1" podStartSLOduration=39.430315311 podStartE2EDuration="39.430315311s" podCreationTimestamp="2026-01-27 12:57:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:58:22.420544277 +0000 UTC m=+1929.657572487" watchObservedRunningTime="2026-01-27 12:58:22.430315311 +0000 UTC m=+1929.667343531" Jan 27 12:58:34 crc kubenswrapper[4900]: I0127 12:58:34.370280 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-1" Jan 27 12:58:34 crc kubenswrapper[4900]: I0127 12:58:34.432448 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 12:58:36 crc kubenswrapper[4900]: I0127 12:58:36.404213 4900 scope.go:117] "RemoveContainer" containerID="a13aabe46ae25a7b0d20ffcbc0d5601f89d5f2e811229b05c594e23f610f2c81" Jan 27 12:58:40 crc kubenswrapper[4900]: I0127 12:58:40.310349 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="ca415683-2d53-4bdc-b9f7-c98610a65cc3" containerName="rabbitmq" containerID="cri-o://60f7d34b6f754639110844ff04c74d1c3d674caa3b8cc7ec2a926c024f9d5de9" gracePeriod=604795 Jan 27 12:58:41 crc kubenswrapper[4900]: I0127 12:58:41.048186 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="ca415683-2d53-4bdc-b9f7-c98610a65cc3" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.135952 4900 generic.go:334] "Generic (PLEG): container finished" podID="ca415683-2d53-4bdc-b9f7-c98610a65cc3" containerID="60f7d34b6f754639110844ff04c74d1c3d674caa3b8cc7ec2a926c024f9d5de9" exitCode=0 Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.136077 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ca415683-2d53-4bdc-b9f7-c98610a65cc3","Type":"ContainerDied","Data":"60f7d34b6f754639110844ff04c74d1c3d674caa3b8cc7ec2a926c024f9d5de9"} Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.309727 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.427684 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-server-conf\") pod \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.428503 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-plugins\") pod \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.428679 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca415683-2d53-4bdc-b9f7-c98610a65cc3-pod-info\") pod \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.428769 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jpp2r\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-kube-api-access-jpp2r\") pod \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.428943 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-erlang-cookie\") pod \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.429031 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-plugins-conf\") pod \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.429152 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "ca415683-2d53-4bdc-b9f7-c98610a65cc3" (UID: "ca415683-2d53-4bdc-b9f7-c98610a65cc3"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.429484 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-confd\") pod \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.429764 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca415683-2d53-4bdc-b9f7-c98610a65cc3-erlang-cookie-secret\") pod \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.429953 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-tls\") pod \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.434871 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0f579265-8309-4424-805b-8013143bfeda\") pod \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.430379 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "ca415683-2d53-4bdc-b9f7-c98610a65cc3" (UID: "ca415683-2d53-4bdc-b9f7-c98610a65cc3"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.430516 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "ca415683-2d53-4bdc-b9f7-c98610a65cc3" (UID: "ca415683-2d53-4bdc-b9f7-c98610a65cc3"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.436119 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/ca415683-2d53-4bdc-b9f7-c98610a65cc3-pod-info" (OuterVolumeSpecName: "pod-info") pod "ca415683-2d53-4bdc-b9f7-c98610a65cc3" (UID: "ca415683-2d53-4bdc-b9f7-c98610a65cc3"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.436171 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-config-data\") pod \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\" (UID: \"ca415683-2d53-4bdc-b9f7-c98610a65cc3\") " Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.440176 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.440203 4900 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca415683-2d53-4bdc-b9f7-c98610a65cc3-pod-info\") on node \"crc\" DevicePath \"\"" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.440220 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.440259 4900 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.460236 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-kube-api-access-jpp2r" (OuterVolumeSpecName: "kube-api-access-jpp2r") pod "ca415683-2d53-4bdc-b9f7-c98610a65cc3" (UID: "ca415683-2d53-4bdc-b9f7-c98610a65cc3"). InnerVolumeSpecName "kube-api-access-jpp2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.466286 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca415683-2d53-4bdc-b9f7-c98610a65cc3-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "ca415683-2d53-4bdc-b9f7-c98610a65cc3" (UID: "ca415683-2d53-4bdc-b9f7-c98610a65cc3"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.473250 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-config-data" (OuterVolumeSpecName: "config-data") pod "ca415683-2d53-4bdc-b9f7-c98610a65cc3" (UID: "ca415683-2d53-4bdc-b9f7-c98610a65cc3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.499113 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "ca415683-2d53-4bdc-b9f7-c98610a65cc3" (UID: "ca415683-2d53-4bdc-b9f7-c98610a65cc3"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.555838 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.555869 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jpp2r\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-kube-api-access-jpp2r\") on node \"crc\" DevicePath \"\"" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.555882 4900 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca415683-2d53-4bdc-b9f7-c98610a65cc3-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.555890 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.581886 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-server-conf" (OuterVolumeSpecName: "server-conf") pod "ca415683-2d53-4bdc-b9f7-c98610a65cc3" (UID: "ca415683-2d53-4bdc-b9f7-c98610a65cc3"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.666101 4900 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca415683-2d53-4bdc-b9f7-c98610a65cc3-server-conf\") on node \"crc\" DevicePath \"\"" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.864345 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "ca415683-2d53-4bdc-b9f7-c98610a65cc3" (UID: "ca415683-2d53-4bdc-b9f7-c98610a65cc3"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.875741 4900 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca415683-2d53-4bdc-b9f7-c98610a65cc3-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.944956 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0f579265-8309-4424-805b-8013143bfeda" (OuterVolumeSpecName: "persistence") pod "ca415683-2d53-4bdc-b9f7-c98610a65cc3" (UID: "ca415683-2d53-4bdc-b9f7-c98610a65cc3"). InnerVolumeSpecName "pvc-0f579265-8309-4424-805b-8013143bfeda". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 27 12:58:47 crc kubenswrapper[4900]: I0127 12:58:47.978663 4900 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-0f579265-8309-4424-805b-8013143bfeda\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0f579265-8309-4424-805b-8013143bfeda\") on node \"crc\" " Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.039306 4900 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.039488 4900 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-0f579265-8309-4424-805b-8013143bfeda" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0f579265-8309-4424-805b-8013143bfeda") on node "crc" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.082580 4900 reconciler_common.go:293] "Volume detached for volume \"pvc-0f579265-8309-4424-805b-8013143bfeda\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0f579265-8309-4424-805b-8013143bfeda\") on node \"crc\" DevicePath \"\"" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.155940 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ca415683-2d53-4bdc-b9f7-c98610a65cc3","Type":"ContainerDied","Data":"62c7fd063d406e834bf1a8b45c1b1433520dbfe57cabcad90a0bac14c0612e6e"} Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.156454 4900 scope.go:117] "RemoveContainer" containerID="60f7d34b6f754639110844ff04c74d1c3d674caa3b8cc7ec2a926c024f9d5de9" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.156700 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.218918 4900 scope.go:117] "RemoveContainer" containerID="4a96193197e852909fdf2ce6bdab6d9377bee4d8f7c787ddc670fdc082daea92" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.235634 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.266644 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.292457 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 12:58:48 crc kubenswrapper[4900]: E0127 12:58:48.293168 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca415683-2d53-4bdc-b9f7-c98610a65cc3" containerName="setup-container" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.293191 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca415683-2d53-4bdc-b9f7-c98610a65cc3" containerName="setup-container" Jan 27 12:58:48 crc kubenswrapper[4900]: E0127 12:58:48.293240 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca415683-2d53-4bdc-b9f7-c98610a65cc3" containerName="rabbitmq" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.293247 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca415683-2d53-4bdc-b9f7-c98610a65cc3" containerName="rabbitmq" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.293571 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca415683-2d53-4bdc-b9f7-c98610a65cc3" containerName="rabbitmq" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.297145 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.324895 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.496354 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca415683-2d53-4bdc-b9f7-c98610a65cc3" path="/var/lib/kubelet/pods/ca415683-2d53-4bdc-b9f7-c98610a65cc3/volumes" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.497388 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d90fa5d6-40e8-4d00-a517-259b0b16f186-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.497426 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmlrc\" (UniqueName: \"kubernetes.io/projected/d90fa5d6-40e8-4d00-a517-259b0b16f186-kube-api-access-mmlrc\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.497589 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d90fa5d6-40e8-4d00-a517-259b0b16f186-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.497620 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d90fa5d6-40e8-4d00-a517-259b0b16f186-config-data\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.497688 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d90fa5d6-40e8-4d00-a517-259b0b16f186-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.497731 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0f579265-8309-4424-805b-8013143bfeda\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0f579265-8309-4424-805b-8013143bfeda\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.497752 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d90fa5d6-40e8-4d00-a517-259b0b16f186-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.497828 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d90fa5d6-40e8-4d00-a517-259b0b16f186-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.497917 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d90fa5d6-40e8-4d00-a517-259b0b16f186-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.497957 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d90fa5d6-40e8-4d00-a517-259b0b16f186-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.498105 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d90fa5d6-40e8-4d00-a517-259b0b16f186-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.600652 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d90fa5d6-40e8-4d00-a517-259b0b16f186-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.600750 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d90fa5d6-40e8-4d00-a517-259b0b16f186-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.600791 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d90fa5d6-40e8-4d00-a517-259b0b16f186-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.600860 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d90fa5d6-40e8-4d00-a517-259b0b16f186-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.600888 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmlrc\" (UniqueName: \"kubernetes.io/projected/d90fa5d6-40e8-4d00-a517-259b0b16f186-kube-api-access-mmlrc\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.601099 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d90fa5d6-40e8-4d00-a517-259b0b16f186-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.601149 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d90fa5d6-40e8-4d00-a517-259b0b16f186-config-data\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.601322 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d90fa5d6-40e8-4d00-a517-259b0b16f186-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.601388 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0f579265-8309-4424-805b-8013143bfeda\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0f579265-8309-4424-805b-8013143bfeda\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.601419 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d90fa5d6-40e8-4d00-a517-259b0b16f186-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.601468 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d90fa5d6-40e8-4d00-a517-259b0b16f186-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.602143 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d90fa5d6-40e8-4d00-a517-259b0b16f186-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.603469 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d90fa5d6-40e8-4d00-a517-259b0b16f186-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.605005 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d90fa5d6-40e8-4d00-a517-259b0b16f186-config-data\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.608400 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d90fa5d6-40e8-4d00-a517-259b0b16f186-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.611385 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d90fa5d6-40e8-4d00-a517-259b0b16f186-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.612884 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d90fa5d6-40e8-4d00-a517-259b0b16f186-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.615137 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d90fa5d6-40e8-4d00-a517-259b0b16f186-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.616003 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d90fa5d6-40e8-4d00-a517-259b0b16f186-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.619656 4900 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.619707 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0f579265-8309-4424-805b-8013143bfeda\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0f579265-8309-4424-805b-8013143bfeda\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/30662d0a0a67567137a827b81cb6827625c105288f56960d31edd50967af9ef2/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.619975 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d90fa5d6-40e8-4d00-a517-259b0b16f186-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.627198 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmlrc\" (UniqueName: \"kubernetes.io/projected/d90fa5d6-40e8-4d00-a517-259b0b16f186-kube-api-access-mmlrc\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.707720 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0f579265-8309-4424-805b-8013143bfeda\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0f579265-8309-4424-805b-8013143bfeda\") pod \"rabbitmq-server-0\" (UID: \"d90fa5d6-40e8-4d00-a517-259b0b16f186\") " pod="openstack/rabbitmq-server-0" Jan 27 12:58:48 crc kubenswrapper[4900]: I0127 12:58:48.937965 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 27 12:58:50 crc kubenswrapper[4900]: I0127 12:58:50.068785 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 27 12:58:50 crc kubenswrapper[4900]: I0127 12:58:50.222776 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d90fa5d6-40e8-4d00-a517-259b0b16f186","Type":"ContainerStarted","Data":"c5cd2daccb1172df785acee83294d511237860dc93bf153076da9fb67dfa709c"} Jan 27 12:58:52 crc kubenswrapper[4900]: I0127 12:58:52.249007 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d90fa5d6-40e8-4d00-a517-259b0b16f186","Type":"ContainerStarted","Data":"22c808cc03728319b8c119b465f2decb11c3510ad52db0ea64497969c495ebd1"} Jan 27 12:59:18 crc kubenswrapper[4900]: I0127 12:59:18.388254 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4fjzb"] Jan 27 12:59:18 crc kubenswrapper[4900]: I0127 12:59:18.391984 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:18 crc kubenswrapper[4900]: I0127 12:59:18.407314 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4fjzb"] Jan 27 12:59:18 crc kubenswrapper[4900]: I0127 12:59:18.413804 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e4a99ab-d092-4834-9be6-77db4083935f-catalog-content\") pod \"redhat-operators-4fjzb\" (UID: \"2e4a99ab-d092-4834-9be6-77db4083935f\") " pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:18 crc kubenswrapper[4900]: I0127 12:59:18.414125 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxxb7\" (UniqueName: \"kubernetes.io/projected/2e4a99ab-d092-4834-9be6-77db4083935f-kube-api-access-rxxb7\") pod \"redhat-operators-4fjzb\" (UID: \"2e4a99ab-d092-4834-9be6-77db4083935f\") " pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:18 crc kubenswrapper[4900]: I0127 12:59:18.414399 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e4a99ab-d092-4834-9be6-77db4083935f-utilities\") pod \"redhat-operators-4fjzb\" (UID: \"2e4a99ab-d092-4834-9be6-77db4083935f\") " pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:18 crc kubenswrapper[4900]: I0127 12:59:18.517676 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e4a99ab-d092-4834-9be6-77db4083935f-catalog-content\") pod \"redhat-operators-4fjzb\" (UID: \"2e4a99ab-d092-4834-9be6-77db4083935f\") " pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:18 crc kubenswrapper[4900]: I0127 12:59:18.517832 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxxb7\" (UniqueName: \"kubernetes.io/projected/2e4a99ab-d092-4834-9be6-77db4083935f-kube-api-access-rxxb7\") pod \"redhat-operators-4fjzb\" (UID: \"2e4a99ab-d092-4834-9be6-77db4083935f\") " pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:18 crc kubenswrapper[4900]: I0127 12:59:18.517968 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e4a99ab-d092-4834-9be6-77db4083935f-utilities\") pod \"redhat-operators-4fjzb\" (UID: \"2e4a99ab-d092-4834-9be6-77db4083935f\") " pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:18 crc kubenswrapper[4900]: I0127 12:59:18.519092 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e4a99ab-d092-4834-9be6-77db4083935f-utilities\") pod \"redhat-operators-4fjzb\" (UID: \"2e4a99ab-d092-4834-9be6-77db4083935f\") " pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:18 crc kubenswrapper[4900]: I0127 12:59:18.519188 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e4a99ab-d092-4834-9be6-77db4083935f-catalog-content\") pod \"redhat-operators-4fjzb\" (UID: \"2e4a99ab-d092-4834-9be6-77db4083935f\") " pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:18 crc kubenswrapper[4900]: I0127 12:59:18.551102 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxxb7\" (UniqueName: \"kubernetes.io/projected/2e4a99ab-d092-4834-9be6-77db4083935f-kube-api-access-rxxb7\") pod \"redhat-operators-4fjzb\" (UID: \"2e4a99ab-d092-4834-9be6-77db4083935f\") " pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:18 crc kubenswrapper[4900]: I0127 12:59:18.752429 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:19 crc kubenswrapper[4900]: I0127 12:59:19.402902 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4fjzb"] Jan 27 12:59:20 crc kubenswrapper[4900]: I0127 12:59:20.014802 4900 generic.go:334] "Generic (PLEG): container finished" podID="2e4a99ab-d092-4834-9be6-77db4083935f" containerID="330770b7826ca287cc71fbb2f9c4d1eee11bf8fa643421f2adf722a53a7410da" exitCode=0 Jan 27 12:59:20 crc kubenswrapper[4900]: I0127 12:59:20.014891 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fjzb" event={"ID":"2e4a99ab-d092-4834-9be6-77db4083935f","Type":"ContainerDied","Data":"330770b7826ca287cc71fbb2f9c4d1eee11bf8fa643421f2adf722a53a7410da"} Jan 27 12:59:20 crc kubenswrapper[4900]: I0127 12:59:20.015134 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fjzb" event={"ID":"2e4a99ab-d092-4834-9be6-77db4083935f","Type":"ContainerStarted","Data":"a891f2b4021b40d2874bb8fa83c0c2546ccc942fcac12d655b384dc8ddb24b59"} Jan 27 12:59:20 crc kubenswrapper[4900]: I0127 12:59:20.017220 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 12:59:21 crc kubenswrapper[4900]: I0127 12:59:21.034331 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fjzb" event={"ID":"2e4a99ab-d092-4834-9be6-77db4083935f","Type":"ContainerStarted","Data":"54305601e12b5a0c30aacdfd77dda80cc4c6118eb542bda0414b8e143411330c"} Jan 27 12:59:25 crc kubenswrapper[4900]: I0127 12:59:25.100558 4900 generic.go:334] "Generic (PLEG): container finished" podID="d90fa5d6-40e8-4d00-a517-259b0b16f186" containerID="22c808cc03728319b8c119b465f2decb11c3510ad52db0ea64497969c495ebd1" exitCode=0 Jan 27 12:59:25 crc kubenswrapper[4900]: I0127 12:59:25.101140 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d90fa5d6-40e8-4d00-a517-259b0b16f186","Type":"ContainerDied","Data":"22c808cc03728319b8c119b465f2decb11c3510ad52db0ea64497969c495ebd1"} Jan 27 12:59:26 crc kubenswrapper[4900]: I0127 12:59:26.115588 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d90fa5d6-40e8-4d00-a517-259b0b16f186","Type":"ContainerStarted","Data":"038a35eea225b4c5af836a7353d50cb5f105eace82cd1081ab30205c9de795ef"} Jan 27 12:59:26 crc kubenswrapper[4900]: I0127 12:59:26.116633 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 27 12:59:26 crc kubenswrapper[4900]: I0127 12:59:26.147389 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.1473515 podStartE2EDuration="38.1473515s" podCreationTimestamp="2026-01-27 12:58:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 12:59:26.139380899 +0000 UTC m=+1993.376409129" watchObservedRunningTime="2026-01-27 12:59:26.1473515 +0000 UTC m=+1993.384379710" Jan 27 12:59:28 crc kubenswrapper[4900]: I0127 12:59:28.142680 4900 generic.go:334] "Generic (PLEG): container finished" podID="2e4a99ab-d092-4834-9be6-77db4083935f" containerID="54305601e12b5a0c30aacdfd77dda80cc4c6118eb542bda0414b8e143411330c" exitCode=0 Jan 27 12:59:28 crc kubenswrapper[4900]: I0127 12:59:28.144191 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fjzb" event={"ID":"2e4a99ab-d092-4834-9be6-77db4083935f","Type":"ContainerDied","Data":"54305601e12b5a0c30aacdfd77dda80cc4c6118eb542bda0414b8e143411330c"} Jan 27 12:59:29 crc kubenswrapper[4900]: I0127 12:59:29.157749 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fjzb" event={"ID":"2e4a99ab-d092-4834-9be6-77db4083935f","Type":"ContainerStarted","Data":"becf3818aa1c02470819fdd9e46d1e25f5c0f13f9c4fba054ad8d11bc4e650da"} Jan 27 12:59:29 crc kubenswrapper[4900]: I0127 12:59:29.182568 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4fjzb" podStartSLOduration=2.662705854 podStartE2EDuration="11.182541597s" podCreationTimestamp="2026-01-27 12:59:18 +0000 UTC" firstStartedPulling="2026-01-27 12:59:20.016848499 +0000 UTC m=+1987.253876709" lastFinishedPulling="2026-01-27 12:59:28.536684232 +0000 UTC m=+1995.773712452" observedRunningTime="2026-01-27 12:59:29.179836348 +0000 UTC m=+1996.416864558" watchObservedRunningTime="2026-01-27 12:59:29.182541597 +0000 UTC m=+1996.419569837" Jan 27 12:59:38 crc kubenswrapper[4900]: I0127 12:59:38.753440 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:38 crc kubenswrapper[4900]: I0127 12:59:38.754020 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:38 crc kubenswrapper[4900]: I0127 12:59:38.941322 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 27 12:59:39 crc kubenswrapper[4900]: I0127 12:59:39.805015 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4fjzb" podUID="2e4a99ab-d092-4834-9be6-77db4083935f" containerName="registry-server" probeResult="failure" output=< Jan 27 12:59:39 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 12:59:39 crc kubenswrapper[4900]: > Jan 27 12:59:46 crc kubenswrapper[4900]: I0127 12:59:46.058556 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-22af-account-create-update-5nw2h"] Jan 27 12:59:46 crc kubenswrapper[4900]: I0127 12:59:46.084521 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-hsfkh"] Jan 27 12:59:46 crc kubenswrapper[4900]: I0127 12:59:46.096523 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-2tkfw"] Jan 27 12:59:46 crc kubenswrapper[4900]: I0127 12:59:46.129813 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-22af-account-create-update-5nw2h"] Jan 27 12:59:46 crc kubenswrapper[4900]: I0127 12:59:46.140750 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-hsfkh"] Jan 27 12:59:46 crc kubenswrapper[4900]: I0127 12:59:46.155191 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-2tkfw"] Jan 27 12:59:46 crc kubenswrapper[4900]: I0127 12:59:46.497070 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa278afe-1535-4c32-af1f-840d8df9dbe5" path="/var/lib/kubelet/pods/aa278afe-1535-4c32-af1f-840d8df9dbe5/volumes" Jan 27 12:59:46 crc kubenswrapper[4900]: I0127 12:59:46.498327 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d14e50e4-6c8c-4eac-af5d-3726019acdc9" path="/var/lib/kubelet/pods/d14e50e4-6c8c-4eac-af5d-3726019acdc9/volumes" Jan 27 12:59:46 crc kubenswrapper[4900]: I0127 12:59:46.499112 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2794095-3d03-4cf0-8e7b-ecc39fb3db7a" path="/var/lib/kubelet/pods/f2794095-3d03-4cf0-8e7b-ecc39fb3db7a/volumes" Jan 27 12:59:49 crc kubenswrapper[4900]: I0127 12:59:49.832666 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4fjzb" podUID="2e4a99ab-d092-4834-9be6-77db4083935f" containerName="registry-server" probeResult="failure" output=< Jan 27 12:59:49 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 12:59:49 crc kubenswrapper[4900]: > Jan 27 12:59:51 crc kubenswrapper[4900]: I0127 12:59:51.035760 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-7e73-account-create-update-6fnzb"] Jan 27 12:59:51 crc kubenswrapper[4900]: I0127 12:59:51.050767 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-7e73-account-create-update-6fnzb"] Jan 27 12:59:52 crc kubenswrapper[4900]: I0127 12:59:52.499530 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02763fa3-4db9-4750-ba18-7a434e9cd831" path="/var/lib/kubelet/pods/02763fa3-4db9-4750-ba18-7a434e9cd831/volumes" Jan 27 12:59:55 crc kubenswrapper[4900]: I0127 12:59:55.063692 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5ade-account-create-update-drsd8"] Jan 27 12:59:55 crc kubenswrapper[4900]: I0127 12:59:55.081841 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-29z9h"] Jan 27 12:59:55 crc kubenswrapper[4900]: I0127 12:59:55.093407 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-5ade-account-create-update-drsd8"] Jan 27 12:59:55 crc kubenswrapper[4900]: I0127 12:59:55.106508 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-29z9h"] Jan 27 12:59:55 crc kubenswrapper[4900]: I0127 12:59:55.117449 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-w6s6w"] Jan 27 12:59:55 crc kubenswrapper[4900]: I0127 12:59:55.128795 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-1e0a-account-create-update-jft2t"] Jan 27 12:59:55 crc kubenswrapper[4900]: I0127 12:59:55.139526 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-w6s6w"] Jan 27 12:59:55 crc kubenswrapper[4900]: I0127 12:59:55.149766 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-1e0a-account-create-update-jft2t"] Jan 27 12:59:56 crc kubenswrapper[4900]: I0127 12:59:56.497553 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="261d14b0-20d4-45ec-ab1d-fdd704a6630b" path="/var/lib/kubelet/pods/261d14b0-20d4-45ec-ab1d-fdd704a6630b/volumes" Jan 27 12:59:56 crc kubenswrapper[4900]: I0127 12:59:56.498967 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31ce1b9b-164b-45ad-b989-27e535bbdb8b" path="/var/lib/kubelet/pods/31ce1b9b-164b-45ad-b989-27e535bbdb8b/volumes" Jan 27 12:59:56 crc kubenswrapper[4900]: I0127 12:59:56.500583 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c79b5e05-afb6-4a73-90d2-32beab5ba2d3" path="/var/lib/kubelet/pods/c79b5e05-afb6-4a73-90d2-32beab5ba2d3/volumes" Jan 27 12:59:56 crc kubenswrapper[4900]: I0127 12:59:56.501973 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee07a3a8-51d8-498e-ace9-c4ea774065fd" path="/var/lib/kubelet/pods/ee07a3a8-51d8-498e-ace9-c4ea774065fd/volumes" Jan 27 12:59:58 crc kubenswrapper[4900]: I0127 12:59:58.823656 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:58 crc kubenswrapper[4900]: I0127 12:59:58.886216 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 12:59:59 crc kubenswrapper[4900]: I0127 12:59:59.073757 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4fjzb"] Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.167909 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688"] Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.170947 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.174233 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.181307 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.186770 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688"] Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.204699 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48f3ebad-795a-44d9-bcfb-26272131254e-config-volume\") pod \"collect-profiles-29491980-7r688\" (UID: \"48f3ebad-795a-44d9-bcfb-26272131254e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.204882 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzmvx\" (UniqueName: \"kubernetes.io/projected/48f3ebad-795a-44d9-bcfb-26272131254e-kube-api-access-rzmvx\") pod \"collect-profiles-29491980-7r688\" (UID: \"48f3ebad-795a-44d9-bcfb-26272131254e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.204974 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48f3ebad-795a-44d9-bcfb-26272131254e-secret-volume\") pod \"collect-profiles-29491980-7r688\" (UID: \"48f3ebad-795a-44d9-bcfb-26272131254e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.309428 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48f3ebad-795a-44d9-bcfb-26272131254e-config-volume\") pod \"collect-profiles-29491980-7r688\" (UID: \"48f3ebad-795a-44d9-bcfb-26272131254e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.309663 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzmvx\" (UniqueName: \"kubernetes.io/projected/48f3ebad-795a-44d9-bcfb-26272131254e-kube-api-access-rzmvx\") pod \"collect-profiles-29491980-7r688\" (UID: \"48f3ebad-795a-44d9-bcfb-26272131254e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.309781 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48f3ebad-795a-44d9-bcfb-26272131254e-secret-volume\") pod \"collect-profiles-29491980-7r688\" (UID: \"48f3ebad-795a-44d9-bcfb-26272131254e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.310877 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48f3ebad-795a-44d9-bcfb-26272131254e-config-volume\") pod \"collect-profiles-29491980-7r688\" (UID: \"48f3ebad-795a-44d9-bcfb-26272131254e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.338904 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48f3ebad-795a-44d9-bcfb-26272131254e-secret-volume\") pod \"collect-profiles-29491980-7r688\" (UID: \"48f3ebad-795a-44d9-bcfb-26272131254e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.345915 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzmvx\" (UniqueName: \"kubernetes.io/projected/48f3ebad-795a-44d9-bcfb-26272131254e-kube-api-access-rzmvx\") pod \"collect-profiles-29491980-7r688\" (UID: \"48f3ebad-795a-44d9-bcfb-26272131254e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.505665 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" Jan 27 13:00:00 crc kubenswrapper[4900]: I0127 13:00:00.831389 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4fjzb" podUID="2e4a99ab-d092-4834-9be6-77db4083935f" containerName="registry-server" containerID="cri-o://becf3818aa1c02470819fdd9e46d1e25f5c0f13f9c4fba054ad8d11bc4e650da" gracePeriod=2 Jan 27 13:00:01 crc kubenswrapper[4900]: I0127 13:00:01.124724 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688"] Jan 27 13:00:01 crc kubenswrapper[4900]: I0127 13:00:01.260383 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 13:00:01 crc kubenswrapper[4900]: I0127 13:00:01.366681 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e4a99ab-d092-4834-9be6-77db4083935f-utilities\") pod \"2e4a99ab-d092-4834-9be6-77db4083935f\" (UID: \"2e4a99ab-d092-4834-9be6-77db4083935f\") " Jan 27 13:00:01 crc kubenswrapper[4900]: I0127 13:00:01.366795 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxxb7\" (UniqueName: \"kubernetes.io/projected/2e4a99ab-d092-4834-9be6-77db4083935f-kube-api-access-rxxb7\") pod \"2e4a99ab-d092-4834-9be6-77db4083935f\" (UID: \"2e4a99ab-d092-4834-9be6-77db4083935f\") " Jan 27 13:00:01 crc kubenswrapper[4900]: I0127 13:00:01.366926 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e4a99ab-d092-4834-9be6-77db4083935f-catalog-content\") pod \"2e4a99ab-d092-4834-9be6-77db4083935f\" (UID: \"2e4a99ab-d092-4834-9be6-77db4083935f\") " Jan 27 13:00:01 crc kubenswrapper[4900]: I0127 13:00:01.369994 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e4a99ab-d092-4834-9be6-77db4083935f-utilities" (OuterVolumeSpecName: "utilities") pod "2e4a99ab-d092-4834-9be6-77db4083935f" (UID: "2e4a99ab-d092-4834-9be6-77db4083935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:00:01 crc kubenswrapper[4900]: I0127 13:00:01.379680 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e4a99ab-d092-4834-9be6-77db4083935f-kube-api-access-rxxb7" (OuterVolumeSpecName: "kube-api-access-rxxb7") pod "2e4a99ab-d092-4834-9be6-77db4083935f" (UID: "2e4a99ab-d092-4834-9be6-77db4083935f"). InnerVolumeSpecName "kube-api-access-rxxb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:00:01 crc kubenswrapper[4900]: I0127 13:00:01.471400 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e4a99ab-d092-4834-9be6-77db4083935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:00:01 crc kubenswrapper[4900]: I0127 13:00:01.471453 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxxb7\" (UniqueName: \"kubernetes.io/projected/2e4a99ab-d092-4834-9be6-77db4083935f-kube-api-access-rxxb7\") on node \"crc\" DevicePath \"\"" Jan 27 13:00:01 crc kubenswrapper[4900]: I0127 13:00:01.515578 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e4a99ab-d092-4834-9be6-77db4083935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2e4a99ab-d092-4834-9be6-77db4083935f" (UID: "2e4a99ab-d092-4834-9be6-77db4083935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:00:01 crc kubenswrapper[4900]: I0127 13:00:01.575705 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e4a99ab-d092-4834-9be6-77db4083935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.183499 4900 generic.go:334] "Generic (PLEG): container finished" podID="2e4a99ab-d092-4834-9be6-77db4083935f" containerID="becf3818aa1c02470819fdd9e46d1e25f5c0f13f9c4fba054ad8d11bc4e650da" exitCode=0 Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.183663 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4fjzb" Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.183672 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fjzb" event={"ID":"2e4a99ab-d092-4834-9be6-77db4083935f","Type":"ContainerDied","Data":"becf3818aa1c02470819fdd9e46d1e25f5c0f13f9c4fba054ad8d11bc4e650da"} Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.183867 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fjzb" event={"ID":"2e4a99ab-d092-4834-9be6-77db4083935f","Type":"ContainerDied","Data":"a891f2b4021b40d2874bb8fa83c0c2546ccc942fcac12d655b384dc8ddb24b59"} Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.183903 4900 scope.go:117] "RemoveContainer" containerID="becf3818aa1c02470819fdd9e46d1e25f5c0f13f9c4fba054ad8d11bc4e650da" Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.191523 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" event={"ID":"48f3ebad-795a-44d9-bcfb-26272131254e","Type":"ContainerStarted","Data":"29e877a26a5da382039a962f9cb8ffc972fed723f8bb5c4582ad700665e53dae"} Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.191896 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" event={"ID":"48f3ebad-795a-44d9-bcfb-26272131254e","Type":"ContainerStarted","Data":"9f44a24dcad2285c8ac0b856f501a95f2b1db19502c6d95c3b4280ff11f306c7"} Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.235165 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" podStartSLOduration=2.235119231 podStartE2EDuration="2.235119231s" podCreationTimestamp="2026-01-27 13:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 13:00:02.219943229 +0000 UTC m=+2029.456971449" watchObservedRunningTime="2026-01-27 13:00:02.235119231 +0000 UTC m=+2029.472147451" Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.239455 4900 scope.go:117] "RemoveContainer" containerID="54305601e12b5a0c30aacdfd77dda80cc4c6118eb542bda0414b8e143411330c" Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.258021 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4fjzb"] Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.271752 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4fjzb"] Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.288350 4900 scope.go:117] "RemoveContainer" containerID="330770b7826ca287cc71fbb2f9c4d1eee11bf8fa643421f2adf722a53a7410da" Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.369576 4900 scope.go:117] "RemoveContainer" containerID="becf3818aa1c02470819fdd9e46d1e25f5c0f13f9c4fba054ad8d11bc4e650da" Jan 27 13:00:02 crc kubenswrapper[4900]: E0127 13:00:02.370590 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"becf3818aa1c02470819fdd9e46d1e25f5c0f13f9c4fba054ad8d11bc4e650da\": container with ID starting with becf3818aa1c02470819fdd9e46d1e25f5c0f13f9c4fba054ad8d11bc4e650da not found: ID does not exist" containerID="becf3818aa1c02470819fdd9e46d1e25f5c0f13f9c4fba054ad8d11bc4e650da" Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.370680 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"becf3818aa1c02470819fdd9e46d1e25f5c0f13f9c4fba054ad8d11bc4e650da"} err="failed to get container status \"becf3818aa1c02470819fdd9e46d1e25f5c0f13f9c4fba054ad8d11bc4e650da\": rpc error: code = NotFound desc = could not find container \"becf3818aa1c02470819fdd9e46d1e25f5c0f13f9c4fba054ad8d11bc4e650da\": container with ID starting with becf3818aa1c02470819fdd9e46d1e25f5c0f13f9c4fba054ad8d11bc4e650da not found: ID does not exist" Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.370743 4900 scope.go:117] "RemoveContainer" containerID="54305601e12b5a0c30aacdfd77dda80cc4c6118eb542bda0414b8e143411330c" Jan 27 13:00:02 crc kubenswrapper[4900]: E0127 13:00:02.371463 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54305601e12b5a0c30aacdfd77dda80cc4c6118eb542bda0414b8e143411330c\": container with ID starting with 54305601e12b5a0c30aacdfd77dda80cc4c6118eb542bda0414b8e143411330c not found: ID does not exist" containerID="54305601e12b5a0c30aacdfd77dda80cc4c6118eb542bda0414b8e143411330c" Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.371502 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54305601e12b5a0c30aacdfd77dda80cc4c6118eb542bda0414b8e143411330c"} err="failed to get container status \"54305601e12b5a0c30aacdfd77dda80cc4c6118eb542bda0414b8e143411330c\": rpc error: code = NotFound desc = could not find container \"54305601e12b5a0c30aacdfd77dda80cc4c6118eb542bda0414b8e143411330c\": container with ID starting with 54305601e12b5a0c30aacdfd77dda80cc4c6118eb542bda0414b8e143411330c not found: ID does not exist" Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.371531 4900 scope.go:117] "RemoveContainer" containerID="330770b7826ca287cc71fbb2f9c4d1eee11bf8fa643421f2adf722a53a7410da" Jan 27 13:00:02 crc kubenswrapper[4900]: E0127 13:00:02.372621 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"330770b7826ca287cc71fbb2f9c4d1eee11bf8fa643421f2adf722a53a7410da\": container with ID starting with 330770b7826ca287cc71fbb2f9c4d1eee11bf8fa643421f2adf722a53a7410da not found: ID does not exist" containerID="330770b7826ca287cc71fbb2f9c4d1eee11bf8fa643421f2adf722a53a7410da" Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.372680 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"330770b7826ca287cc71fbb2f9c4d1eee11bf8fa643421f2adf722a53a7410da"} err="failed to get container status \"330770b7826ca287cc71fbb2f9c4d1eee11bf8fa643421f2adf722a53a7410da\": rpc error: code = NotFound desc = could not find container \"330770b7826ca287cc71fbb2f9c4d1eee11bf8fa643421f2adf722a53a7410da\": container with ID starting with 330770b7826ca287cc71fbb2f9c4d1eee11bf8fa643421f2adf722a53a7410da not found: ID does not exist" Jan 27 13:00:02 crc kubenswrapper[4900]: I0127 13:00:02.500477 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e4a99ab-d092-4834-9be6-77db4083935f" path="/var/lib/kubelet/pods/2e4a99ab-d092-4834-9be6-77db4083935f/volumes" Jan 27 13:00:03 crc kubenswrapper[4900]: I0127 13:00:03.044271 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg"] Jan 27 13:00:03 crc kubenswrapper[4900]: I0127 13:00:03.056953 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-accf-account-create-update-s6gmw"] Jan 27 13:00:03 crc kubenswrapper[4900]: I0127 13:00:03.071149 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-wcmjg"] Jan 27 13:00:03 crc kubenswrapper[4900]: I0127 13:00:03.085115 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-accf-account-create-update-s6gmw"] Jan 27 13:00:03 crc kubenswrapper[4900]: I0127 13:00:03.210163 4900 generic.go:334] "Generic (PLEG): container finished" podID="48f3ebad-795a-44d9-bcfb-26272131254e" containerID="29e877a26a5da382039a962f9cb8ffc972fed723f8bb5c4582ad700665e53dae" exitCode=0 Jan 27 13:00:03 crc kubenswrapper[4900]: I0127 13:00:03.210280 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" event={"ID":"48f3ebad-795a-44d9-bcfb-26272131254e","Type":"ContainerDied","Data":"29e877a26a5da382039a962f9cb8ffc972fed723f8bb5c4582ad700665e53dae"} Jan 27 13:00:04 crc kubenswrapper[4900]: I0127 13:00:04.594380 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad92fe8d-3cd2-4bac-8d4e-8e0590824e17" path="/var/lib/kubelet/pods/ad92fe8d-3cd2-4bac-8d4e-8e0590824e17/volumes" Jan 27 13:00:04 crc kubenswrapper[4900]: I0127 13:00:04.614482 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd31604a-235d-497a-a5fb-00c2928a0954" path="/var/lib/kubelet/pods/fd31604a-235d-497a-a5fb-00c2928a0954/volumes" Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.397131 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.478857 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48f3ebad-795a-44d9-bcfb-26272131254e-config-volume\") pod \"48f3ebad-795a-44d9-bcfb-26272131254e\" (UID: \"48f3ebad-795a-44d9-bcfb-26272131254e\") " Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.479029 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48f3ebad-795a-44d9-bcfb-26272131254e-secret-volume\") pod \"48f3ebad-795a-44d9-bcfb-26272131254e\" (UID: \"48f3ebad-795a-44d9-bcfb-26272131254e\") " Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.479279 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzmvx\" (UniqueName: \"kubernetes.io/projected/48f3ebad-795a-44d9-bcfb-26272131254e-kube-api-access-rzmvx\") pod \"48f3ebad-795a-44d9-bcfb-26272131254e\" (UID: \"48f3ebad-795a-44d9-bcfb-26272131254e\") " Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.479724 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48f3ebad-795a-44d9-bcfb-26272131254e-config-volume" (OuterVolumeSpecName: "config-volume") pod "48f3ebad-795a-44d9-bcfb-26272131254e" (UID: "48f3ebad-795a-44d9-bcfb-26272131254e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.480445 4900 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48f3ebad-795a-44d9-bcfb-26272131254e-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.496778 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48f3ebad-795a-44d9-bcfb-26272131254e-kube-api-access-rzmvx" (OuterVolumeSpecName: "kube-api-access-rzmvx") pod "48f3ebad-795a-44d9-bcfb-26272131254e" (UID: "48f3ebad-795a-44d9-bcfb-26272131254e"). InnerVolumeSpecName "kube-api-access-rzmvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.505434 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48f3ebad-795a-44d9-bcfb-26272131254e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "48f3ebad-795a-44d9-bcfb-26272131254e" (UID: "48f3ebad-795a-44d9-bcfb-26272131254e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.540801 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9"] Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.555523 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491935-l6zm9"] Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.583504 4900 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48f3ebad-795a-44d9-bcfb-26272131254e-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.583534 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzmvx\" (UniqueName: \"kubernetes.io/projected/48f3ebad-795a-44d9-bcfb-26272131254e-kube-api-access-rzmvx\") on node \"crc\" DevicePath \"\"" Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.585204 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" event={"ID":"48f3ebad-795a-44d9-bcfb-26272131254e","Type":"ContainerDied","Data":"9f44a24dcad2285c8ac0b856f501a95f2b1db19502c6d95c3b4280ff11f306c7"} Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.585255 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f44a24dcad2285c8ac0b856f501a95f2b1db19502c6d95c3b4280ff11f306c7" Jan 27 13:00:05 crc kubenswrapper[4900]: I0127 13:00:05.585333 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688" Jan 27 13:00:06 crc kubenswrapper[4900]: I0127 13:00:06.502093 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea673ed9-5530-4b5d-8997-403f903d27a6" path="/var/lib/kubelet/pods/ea673ed9-5530-4b5d-8997-403f903d27a6/volumes" Jan 27 13:00:07 crc kubenswrapper[4900]: I0127 13:00:07.034525 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-m57ds"] Jan 27 13:00:07 crc kubenswrapper[4900]: I0127 13:00:07.047484 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-m57ds"] Jan 27 13:00:08 crc kubenswrapper[4900]: I0127 13:00:08.510667 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65635e6c-be43-42a5-a370-884329911a60" path="/var/lib/kubelet/pods/65635e6c-be43-42a5-a370-884329911a60/volumes" Jan 27 13:00:22 crc kubenswrapper[4900]: I0127 13:00:22.462605 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:00:22 crc kubenswrapper[4900]: I0127 13:00:22.463248 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:00:29 crc kubenswrapper[4900]: I0127 13:00:29.066482 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-5eac-account-create-update-ggscp"] Jan 27 13:00:29 crc kubenswrapper[4900]: I0127 13:00:29.084016 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-sfjk4"] Jan 27 13:00:29 crc kubenswrapper[4900]: I0127 13:00:29.098246 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-5eac-account-create-update-ggscp"] Jan 27 13:00:29 crc kubenswrapper[4900]: I0127 13:00:29.109738 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-8xhbm"] Jan 27 13:00:29 crc kubenswrapper[4900]: I0127 13:00:29.121713 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-c7be-account-create-update-wtq6h"] Jan 27 13:00:29 crc kubenswrapper[4900]: I0127 13:00:29.135110 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-8xhbm"] Jan 27 13:00:29 crc kubenswrapper[4900]: I0127 13:00:29.147120 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-sfjk4"] Jan 27 13:00:29 crc kubenswrapper[4900]: I0127 13:00:29.157829 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-c7be-account-create-update-wtq6h"] Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.043985 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-qkl46"] Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.064014 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-3073-account-create-update-wvxth"] Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.081641 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-6c48-account-create-update-nw88h"] Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.095554 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-qkl46"] Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.112147 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-529rf"] Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.134643 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-3073-account-create-update-wvxth"] Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.160484 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-529rf"] Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.174774 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-6c48-account-create-update-nw88h"] Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.496725 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10a04b5b-42d9-488f-b5b3-21e86476918a" path="/var/lib/kubelet/pods/10a04b5b-42d9-488f-b5b3-21e86476918a/volumes" Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.498729 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e77e1e2-114e-44d3-8fc0-5c3d09e9179e" path="/var/lib/kubelet/pods/2e77e1e2-114e-44d3-8fc0-5c3d09e9179e/volumes" Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.500650 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51b69b53-dd5e-43e9-99c0-83c33e8e2168" path="/var/lib/kubelet/pods/51b69b53-dd5e-43e9-99c0-83c33e8e2168/volumes" Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.502026 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a470e2e-c53a-4c18-ae8f-236d3d410455" path="/var/lib/kubelet/pods/6a470e2e-c53a-4c18-ae8f-236d3d410455/volumes" Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.504334 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cd54e89-bf17-4216-955a-08f26a918d67" path="/var/lib/kubelet/pods/6cd54e89-bf17-4216-955a-08f26a918d67/volumes" Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.505742 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83e3ec3d-0683-4f57-b926-fbbeb94957a8" path="/var/lib/kubelet/pods/83e3ec3d-0683-4f57-b926-fbbeb94957a8/volumes" Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.507049 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc96b906-bcb7-451e-bab7-8b4c00058ed9" path="/var/lib/kubelet/pods/dc96b906-bcb7-451e-bab7-8b4c00058ed9/volumes" Jan 27 13:00:30 crc kubenswrapper[4900]: I0127 13:00:30.508794 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1" path="/var/lib/kubelet/pods/fd1bdfdc-d4ee-4914-8cb4-0dab96b4a8a1/volumes" Jan 27 13:00:33 crc kubenswrapper[4900]: I0127 13:00:33.040205 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-5rc2l"] Jan 27 13:00:33 crc kubenswrapper[4900]: I0127 13:00:33.053971 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-5rc2l"] Jan 27 13:00:34 crc kubenswrapper[4900]: I0127 13:00:34.495829 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4" path="/var/lib/kubelet/pods/1fd5d703-8c5b-42f2-ab79-7aeb1d9dc1a4/volumes" Jan 27 13:00:36 crc kubenswrapper[4900]: I0127 13:00:36.695344 4900 scope.go:117] "RemoveContainer" containerID="c052ead14ba9fc79c26767e2aba1acaf5a6d0e3e406e31242145e429fe21e451" Jan 27 13:00:36 crc kubenswrapper[4900]: I0127 13:00:36.726437 4900 scope.go:117] "RemoveContainer" containerID="6f6f62fac538794159a5813ad7b076ad5cb7c443c9827eea47cb4e68b7f77b92" Jan 27 13:00:36 crc kubenswrapper[4900]: I0127 13:00:36.757759 4900 scope.go:117] "RemoveContainer" containerID="0056d78e57274c7e0851e0b759847b1981314cfd2e17ab468966641560115762" Jan 27 13:00:36 crc kubenswrapper[4900]: I0127 13:00:36.832886 4900 scope.go:117] "RemoveContainer" containerID="e6619e8476cd8fbe3647dd264d54c98dda8cdc5dc296d0fc5bfb11a469a47c86" Jan 27 13:00:36 crc kubenswrapper[4900]: I0127 13:00:36.891660 4900 scope.go:117] "RemoveContainer" containerID="dcc580548641865be2c637dfeec0215aea33ac37c34b70abfac8e1a5efd980d2" Jan 27 13:00:36 crc kubenswrapper[4900]: I0127 13:00:36.945928 4900 scope.go:117] "RemoveContainer" containerID="35ee5d2241fe7a1fe71203b8b74c11d5b957bc15fb75a7bfada360530e79b364" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.032301 4900 scope.go:117] "RemoveContainer" containerID="fd673af18ccdf9f88a43ee5a7d8f4906e58279b428d93efb0404f39e168bcfcd" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.143271 4900 scope.go:117] "RemoveContainer" containerID="f28eb2dc00c93b8d7274fe9fd73951a031d994477730a4a40fb722418b6342e5" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.239780 4900 scope.go:117] "RemoveContainer" containerID="49f14d4f9917671596f82ca771908d68402b2ffa5a4cf4fe043f31c03b8b8191" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.266988 4900 scope.go:117] "RemoveContainer" containerID="a74c693832fccbd0c134ca600dcc21213cf57e2ea4e26eea0ea9072e40e0874a" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.289830 4900 scope.go:117] "RemoveContainer" containerID="13889eab61a640be1b81555b976028b81125604a3d51dfc72ef84d8214c3d80e" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.313719 4900 scope.go:117] "RemoveContainer" containerID="d88d6a7adfbc0aeb34e6d996817676771fe1bb48c853ece486365197f4bab755" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.337463 4900 scope.go:117] "RemoveContainer" containerID="750239e6f863bd37bf3a65af23ef7bbad093e351d2e4d571498c10d1dd39cf26" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.365807 4900 scope.go:117] "RemoveContainer" containerID="194d7724541532f8815232c97f649e2362bb78b3d29d8f1d12b016971da45504" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.390949 4900 scope.go:117] "RemoveContainer" containerID="e90e71088358793e98d3671c3bacb75ced3fd1e80b860cf1140887d9c6e44be8" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.424168 4900 scope.go:117] "RemoveContainer" containerID="66fddc6f9867ae744051c06d1f664d9a3c959402e0b84c0676616f0b61c4b56d" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.453240 4900 scope.go:117] "RemoveContainer" containerID="7b373ed0c02d36b7ba775a9be8852008006162a5ea5e017f61db65917394590e" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.484979 4900 scope.go:117] "RemoveContainer" containerID="bc62fef879308289b145fb901cc7256ca0e2f2847a617b712c5e76f1ce39d8e4" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.529515 4900 scope.go:117] "RemoveContainer" containerID="0e841b955ac81d5af63736094f712f5c64b0834928d576c93f3b7daf3f5a6c6f" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.562269 4900 scope.go:117] "RemoveContainer" containerID="66cd4b87dbcfbd189d10c0564ac6e1988a9f728e9b390784896b00721150a212" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.599592 4900 scope.go:117] "RemoveContainer" containerID="4b20bd641d9bcd1c9494f4e55c3e92935ab4cfb59d02fc7b56f79dfbc53f1a4b" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.636451 4900 scope.go:117] "RemoveContainer" containerID="64398728ab6d462ef88ef1539188265db676a604efdf20888df073cc53a5ad60" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.667739 4900 scope.go:117] "RemoveContainer" containerID="640d5ad067ae99ae44c0d01c68c5fe1ce407c89a12675f519764cbcb91097de9" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.702181 4900 scope.go:117] "RemoveContainer" containerID="0b2d2d88e6d93572de7f994bb6020de9c67be42180ca0c184d0bda46a2044b58" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.728937 4900 scope.go:117] "RemoveContainer" containerID="d8a2e50f8fd6d787bc58416c0f2aa1d34e25aaa6898c22ac1e748213e7ff8d6f" Jan 27 13:00:37 crc kubenswrapper[4900]: I0127 13:00:37.841000 4900 scope.go:117] "RemoveContainer" containerID="336b4567761cf6502313a127525bcce7e632a8eb755abd50086fc60bf5e4d619" Jan 27 13:00:42 crc kubenswrapper[4900]: I0127 13:00:42.041888 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-cp5z4"] Jan 27 13:00:42 crc kubenswrapper[4900]: I0127 13:00:42.055447 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-cp5z4"] Jan 27 13:00:42 crc kubenswrapper[4900]: I0127 13:00:42.501574 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf9e2ef8-0e37-438f-b2bd-c1c050c4064b" path="/var/lib/kubelet/pods/cf9e2ef8-0e37-438f-b2bd-c1c050c4064b/volumes" Jan 27 13:00:52 crc kubenswrapper[4900]: I0127 13:00:52.373050 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:00:52 crc kubenswrapper[4900]: I0127 13:00:52.373816 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.165214 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29491981-mdm4z"] Jan 27 13:01:00 crc kubenswrapper[4900]: E0127 13:01:00.166414 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e4a99ab-d092-4834-9be6-77db4083935f" containerName="registry-server" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.166431 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e4a99ab-d092-4834-9be6-77db4083935f" containerName="registry-server" Jan 27 13:01:00 crc kubenswrapper[4900]: E0127 13:01:00.166447 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e4a99ab-d092-4834-9be6-77db4083935f" containerName="extract-utilities" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.166454 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e4a99ab-d092-4834-9be6-77db4083935f" containerName="extract-utilities" Jan 27 13:01:00 crc kubenswrapper[4900]: E0127 13:01:00.166466 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e4a99ab-d092-4834-9be6-77db4083935f" containerName="extract-content" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.166475 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e4a99ab-d092-4834-9be6-77db4083935f" containerName="extract-content" Jan 27 13:01:00 crc kubenswrapper[4900]: E0127 13:01:00.166483 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48f3ebad-795a-44d9-bcfb-26272131254e" containerName="collect-profiles" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.166490 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="48f3ebad-795a-44d9-bcfb-26272131254e" containerName="collect-profiles" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.166767 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="48f3ebad-795a-44d9-bcfb-26272131254e" containerName="collect-profiles" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.166780 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e4a99ab-d092-4834-9be6-77db4083935f" containerName="registry-server" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.167824 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.180468 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29491981-mdm4z"] Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.255583 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-combined-ca-bundle\") pod \"keystone-cron-29491981-mdm4z\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.255910 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljhcz\" (UniqueName: \"kubernetes.io/projected/844ec44e-58d8-4d70-8756-ebb19272cf74-kube-api-access-ljhcz\") pod \"keystone-cron-29491981-mdm4z\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.256021 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-fernet-keys\") pod \"keystone-cron-29491981-mdm4z\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.256217 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-config-data\") pod \"keystone-cron-29491981-mdm4z\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.359421 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-fernet-keys\") pod \"keystone-cron-29491981-mdm4z\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.359799 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-config-data\") pod \"keystone-cron-29491981-mdm4z\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.360020 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-combined-ca-bundle\") pod \"keystone-cron-29491981-mdm4z\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.360278 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljhcz\" (UniqueName: \"kubernetes.io/projected/844ec44e-58d8-4d70-8756-ebb19272cf74-kube-api-access-ljhcz\") pod \"keystone-cron-29491981-mdm4z\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.372067 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-config-data\") pod \"keystone-cron-29491981-mdm4z\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.373157 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-combined-ca-bundle\") pod \"keystone-cron-29491981-mdm4z\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.377420 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-fernet-keys\") pod \"keystone-cron-29491981-mdm4z\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.393909 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljhcz\" (UniqueName: \"kubernetes.io/projected/844ec44e-58d8-4d70-8756-ebb19272cf74-kube-api-access-ljhcz\") pod \"keystone-cron-29491981-mdm4z\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.489874 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.584232 4900 generic.go:334] "Generic (PLEG): container finished" podID="53908aae-9e96-453f-91e6-f17eeb2ce37a" containerID="da77867d6f93fdc8ee8ab73693838c98c0ff5f9274c979dac3724997daf40f95" exitCode=0 Jan 27 13:01:00 crc kubenswrapper[4900]: I0127 13:01:00.584288 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" event={"ID":"53908aae-9e96-453f-91e6-f17eeb2ce37a","Type":"ContainerDied","Data":"da77867d6f93fdc8ee8ab73693838c98c0ff5f9274c979dac3724997daf40f95"} Jan 27 13:01:01 crc kubenswrapper[4900]: I0127 13:01:01.024786 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29491981-mdm4z"] Jan 27 13:01:01 crc kubenswrapper[4900]: I0127 13:01:01.601625 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29491981-mdm4z" event={"ID":"844ec44e-58d8-4d70-8756-ebb19272cf74","Type":"ContainerStarted","Data":"f3be7f8959006d26701aeb4d7e90e791b4f38a453135e83469d6a8d0d2cf7432"} Jan 27 13:01:01 crc kubenswrapper[4900]: I0127 13:01:01.606402 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29491981-mdm4z" event={"ID":"844ec44e-58d8-4d70-8756-ebb19272cf74","Type":"ContainerStarted","Data":"2f2b3a7d3a9a9959ff523b6f24ef9f848640e77278005d59ff9f99abf593b29a"} Jan 27 13:01:01 crc kubenswrapper[4900]: I0127 13:01:01.633713 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29491981-mdm4z" podStartSLOduration=1.6336871 podStartE2EDuration="1.6336871s" podCreationTimestamp="2026-01-27 13:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 13:01:01.618310763 +0000 UTC m=+2088.855338973" watchObservedRunningTime="2026-01-27 13:01:01.6336871 +0000 UTC m=+2088.870715310" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.269468 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.314628 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-bootstrap-combined-ca-bundle\") pod \"53908aae-9e96-453f-91e6-f17eeb2ce37a\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.314893 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rnbb\" (UniqueName: \"kubernetes.io/projected/53908aae-9e96-453f-91e6-f17eeb2ce37a-kube-api-access-2rnbb\") pod \"53908aae-9e96-453f-91e6-f17eeb2ce37a\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.315090 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-inventory\") pod \"53908aae-9e96-453f-91e6-f17eeb2ce37a\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.315217 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-ssh-key-openstack-edpm-ipam\") pod \"53908aae-9e96-453f-91e6-f17eeb2ce37a\" (UID: \"53908aae-9e96-453f-91e6-f17eeb2ce37a\") " Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.324742 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "53908aae-9e96-453f-91e6-f17eeb2ce37a" (UID: "53908aae-9e96-453f-91e6-f17eeb2ce37a"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.324828 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53908aae-9e96-453f-91e6-f17eeb2ce37a-kube-api-access-2rnbb" (OuterVolumeSpecName: "kube-api-access-2rnbb") pod "53908aae-9e96-453f-91e6-f17eeb2ce37a" (UID: "53908aae-9e96-453f-91e6-f17eeb2ce37a"). InnerVolumeSpecName "kube-api-access-2rnbb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.361726 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "53908aae-9e96-453f-91e6-f17eeb2ce37a" (UID: "53908aae-9e96-453f-91e6-f17eeb2ce37a"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.386375 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-inventory" (OuterVolumeSpecName: "inventory") pod "53908aae-9e96-453f-91e6-f17eeb2ce37a" (UID: "53908aae-9e96-453f-91e6-f17eeb2ce37a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.418689 4900 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.418951 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rnbb\" (UniqueName: \"kubernetes.io/projected/53908aae-9e96-453f-91e6-f17eeb2ce37a-kube-api-access-2rnbb\") on node \"crc\" DevicePath \"\"" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.419023 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.419180 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/53908aae-9e96-453f-91e6-f17eeb2ce37a-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.622524 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.625167 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m" event={"ID":"53908aae-9e96-453f-91e6-f17eeb2ce37a","Type":"ContainerDied","Data":"5a864bfa023cb6636fe15bc661df150cdc8a77a9454a58ff8d00452e35fef37c"} Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.625291 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a864bfa023cb6636fe15bc661df150cdc8a77a9454a58ff8d00452e35fef37c" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.728324 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp"] Jan 27 13:01:02 crc kubenswrapper[4900]: E0127 13:01:02.729045 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53908aae-9e96-453f-91e6-f17eeb2ce37a" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.729482 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="53908aae-9e96-453f-91e6-f17eeb2ce37a" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.730172 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="53908aae-9e96-453f-91e6-f17eeb2ce37a" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.731521 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.735710 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.740403 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.740659 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.740977 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.769678 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp"] Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.840707 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c74db364-6bed-43d8-988a-ed979e4827cf-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xssdp\" (UID: \"c74db364-6bed-43d8-988a-ed979e4827cf\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.841398 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c74db364-6bed-43d8-988a-ed979e4827cf-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xssdp\" (UID: \"c74db364-6bed-43d8-988a-ed979e4827cf\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.841650 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvs2h\" (UniqueName: \"kubernetes.io/projected/c74db364-6bed-43d8-988a-ed979e4827cf-kube-api-access-rvs2h\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xssdp\" (UID: \"c74db364-6bed-43d8-988a-ed979e4827cf\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.943230 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c74db364-6bed-43d8-988a-ed979e4827cf-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xssdp\" (UID: \"c74db364-6bed-43d8-988a-ed979e4827cf\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.943736 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvs2h\" (UniqueName: \"kubernetes.io/projected/c74db364-6bed-43d8-988a-ed979e4827cf-kube-api-access-rvs2h\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xssdp\" (UID: \"c74db364-6bed-43d8-988a-ed979e4827cf\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.943894 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c74db364-6bed-43d8-988a-ed979e4827cf-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xssdp\" (UID: \"c74db364-6bed-43d8-988a-ed979e4827cf\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.948276 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c74db364-6bed-43d8-988a-ed979e4827cf-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xssdp\" (UID: \"c74db364-6bed-43d8-988a-ed979e4827cf\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.948371 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c74db364-6bed-43d8-988a-ed979e4827cf-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xssdp\" (UID: \"c74db364-6bed-43d8-988a-ed979e4827cf\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" Jan 27 13:01:02 crc kubenswrapper[4900]: I0127 13:01:02.965924 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvs2h\" (UniqueName: \"kubernetes.io/projected/c74db364-6bed-43d8-988a-ed979e4827cf-kube-api-access-rvs2h\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xssdp\" (UID: \"c74db364-6bed-43d8-988a-ed979e4827cf\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" Jan 27 13:01:03 crc kubenswrapper[4900]: I0127 13:01:03.100439 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" Jan 27 13:01:03 crc kubenswrapper[4900]: I0127 13:01:03.957423 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp"] Jan 27 13:01:04 crc kubenswrapper[4900]: I0127 13:01:04.650512 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" event={"ID":"c74db364-6bed-43d8-988a-ed979e4827cf","Type":"ContainerStarted","Data":"9cef0d8aecc38e245c5c0ef1051bb84ca315a62ab1311ea6bb40d263b5070d9e"} Jan 27 13:01:05 crc kubenswrapper[4900]: I0127 13:01:05.665609 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" event={"ID":"c74db364-6bed-43d8-988a-ed979e4827cf","Type":"ContainerStarted","Data":"dd0ac3c28600323358b447a19cff0d405d6d1f64b22dab33c184538d7c2b5244"} Jan 27 13:01:05 crc kubenswrapper[4900]: I0127 13:01:05.667780 4900 generic.go:334] "Generic (PLEG): container finished" podID="844ec44e-58d8-4d70-8756-ebb19272cf74" containerID="f3be7f8959006d26701aeb4d7e90e791b4f38a453135e83469d6a8d0d2cf7432" exitCode=0 Jan 27 13:01:05 crc kubenswrapper[4900]: I0127 13:01:05.667865 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29491981-mdm4z" event={"ID":"844ec44e-58d8-4d70-8756-ebb19272cf74","Type":"ContainerDied","Data":"f3be7f8959006d26701aeb4d7e90e791b4f38a453135e83469d6a8d0d2cf7432"} Jan 27 13:01:05 crc kubenswrapper[4900]: I0127 13:01:05.685720 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" podStartSLOduration=3.186247124 podStartE2EDuration="3.685695381s" podCreationTimestamp="2026-01-27 13:01:02 +0000 UTC" firstStartedPulling="2026-01-27 13:01:03.972930976 +0000 UTC m=+2091.209959186" lastFinishedPulling="2026-01-27 13:01:04.472379233 +0000 UTC m=+2091.709407443" observedRunningTime="2026-01-27 13:01:05.683633541 +0000 UTC m=+2092.920661751" watchObservedRunningTime="2026-01-27 13:01:05.685695381 +0000 UTC m=+2092.922723591" Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.255796 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.442453 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-combined-ca-bundle\") pod \"844ec44e-58d8-4d70-8756-ebb19272cf74\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.442593 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-config-data\") pod \"844ec44e-58d8-4d70-8756-ebb19272cf74\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.442641 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-fernet-keys\") pod \"844ec44e-58d8-4d70-8756-ebb19272cf74\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.442679 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ljhcz\" (UniqueName: \"kubernetes.io/projected/844ec44e-58d8-4d70-8756-ebb19272cf74-kube-api-access-ljhcz\") pod \"844ec44e-58d8-4d70-8756-ebb19272cf74\" (UID: \"844ec44e-58d8-4d70-8756-ebb19272cf74\") " Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.466145 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "844ec44e-58d8-4d70-8756-ebb19272cf74" (UID: "844ec44e-58d8-4d70-8756-ebb19272cf74"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.466380 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/844ec44e-58d8-4d70-8756-ebb19272cf74-kube-api-access-ljhcz" (OuterVolumeSpecName: "kube-api-access-ljhcz") pod "844ec44e-58d8-4d70-8756-ebb19272cf74" (UID: "844ec44e-58d8-4d70-8756-ebb19272cf74"). InnerVolumeSpecName "kube-api-access-ljhcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.495180 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "844ec44e-58d8-4d70-8756-ebb19272cf74" (UID: "844ec44e-58d8-4d70-8756-ebb19272cf74"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.528871 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-config-data" (OuterVolumeSpecName: "config-data") pod "844ec44e-58d8-4d70-8756-ebb19272cf74" (UID: "844ec44e-58d8-4d70-8756-ebb19272cf74"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.545238 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.545279 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.545295 4900 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/844ec44e-58d8-4d70-8756-ebb19272cf74-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.545309 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ljhcz\" (UniqueName: \"kubernetes.io/projected/844ec44e-58d8-4d70-8756-ebb19272cf74-kube-api-access-ljhcz\") on node \"crc\" DevicePath \"\"" Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.691946 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29491981-mdm4z" event={"ID":"844ec44e-58d8-4d70-8756-ebb19272cf74","Type":"ContainerDied","Data":"2f2b3a7d3a9a9959ff523b6f24ef9f848640e77278005d59ff9f99abf593b29a"} Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.692004 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f2b3a7d3a9a9959ff523b6f24ef9f848640e77278005d59ff9f99abf593b29a" Jan 27 13:01:07 crc kubenswrapper[4900]: I0127 13:01:07.692102 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29491981-mdm4z" Jan 27 13:01:22 crc kubenswrapper[4900]: I0127 13:01:22.372752 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:01:22 crc kubenswrapper[4900]: I0127 13:01:22.373361 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:01:22 crc kubenswrapper[4900]: I0127 13:01:22.373414 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 13:01:22 crc kubenswrapper[4900]: I0127 13:01:22.374515 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"14bb46af03aa708f5b56f9d73717659956f8ecfa5ffc85cf276e4cc701eaafcb"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 13:01:22 crc kubenswrapper[4900]: I0127 13:01:22.374590 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://14bb46af03aa708f5b56f9d73717659956f8ecfa5ffc85cf276e4cc701eaafcb" gracePeriod=600 Jan 27 13:01:22 crc kubenswrapper[4900]: I0127 13:01:22.875393 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="14bb46af03aa708f5b56f9d73717659956f8ecfa5ffc85cf276e4cc701eaafcb" exitCode=0 Jan 27 13:01:22 crc kubenswrapper[4900]: I0127 13:01:22.875488 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"14bb46af03aa708f5b56f9d73717659956f8ecfa5ffc85cf276e4cc701eaafcb"} Jan 27 13:01:22 crc kubenswrapper[4900]: I0127 13:01:22.876170 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e"} Jan 27 13:01:22 crc kubenswrapper[4900]: I0127 13:01:22.876205 4900 scope.go:117] "RemoveContainer" containerID="4f82962a59e42e3a6dffd54599424f6bce2060cad22eea6c959dbb8f00cda876" Jan 27 13:01:23 crc kubenswrapper[4900]: I0127 13:01:23.059460 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-d4wfs"] Jan 27 13:01:23 crc kubenswrapper[4900]: I0127 13:01:23.072381 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-d4wfs"] Jan 27 13:01:24 crc kubenswrapper[4900]: I0127 13:01:24.498077 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3932d13-5d69-455f-88db-3978da7a8c00" path="/var/lib/kubelet/pods/f3932d13-5d69-455f-88db-3978da7a8c00/volumes" Jan 27 13:01:32 crc kubenswrapper[4900]: I0127 13:01:32.039751 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-4kmq8"] Jan 27 13:01:32 crc kubenswrapper[4900]: I0127 13:01:32.057181 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-4kmq8"] Jan 27 13:01:32 crc kubenswrapper[4900]: I0127 13:01:32.497491 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5c58344-109f-4fd1-948c-39ef56d4b0eb" path="/var/lib/kubelet/pods/a5c58344-109f-4fd1-948c-39ef56d4b0eb/volumes" Jan 27 13:01:37 crc kubenswrapper[4900]: I0127 13:01:37.032878 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-6hsbc"] Jan 27 13:01:37 crc kubenswrapper[4900]: I0127 13:01:37.048727 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-6hsbc"] Jan 27 13:01:37 crc kubenswrapper[4900]: I0127 13:01:37.062818 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-dr6wx"] Jan 27 13:01:37 crc kubenswrapper[4900]: I0127 13:01:37.075870 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-dr6wx"] Jan 27 13:01:38 crc kubenswrapper[4900]: I0127 13:01:38.407829 4900 scope.go:117] "RemoveContainer" containerID="575c3ce2fc52aa5ad7d9c254701fe8fd0ecb1b1cffc53e229d74a5d284fcfcec" Jan 27 13:01:38 crc kubenswrapper[4900]: I0127 13:01:38.456108 4900 scope.go:117] "RemoveContainer" containerID="a4174ce3a965df25a117d4db3cc9b3b623246141a958b9c57fb41c4814dfcffb" Jan 27 13:01:38 crc kubenswrapper[4900]: I0127 13:01:38.524540 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="551282ea-7408-404c-9128-cfa4f06089f3" path="/var/lib/kubelet/pods/551282ea-7408-404c-9128-cfa4f06089f3/volumes" Jan 27 13:01:38 crc kubenswrapper[4900]: I0127 13:01:38.526773 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed853af8-ff05-4908-a82f-deceefe54dad" path="/var/lib/kubelet/pods/ed853af8-ff05-4908-a82f-deceefe54dad/volumes" Jan 27 13:01:38 crc kubenswrapper[4900]: I0127 13:01:38.539470 4900 scope.go:117] "RemoveContainer" containerID="2cdcc5f6b97f6d2c57e295057c54e9dbf3f2c114cb50cf9a4ad28c5343df0fbd" Jan 27 13:01:38 crc kubenswrapper[4900]: I0127 13:01:38.598271 4900 scope.go:117] "RemoveContainer" containerID="8a64a45b84d96da73c53d0f60621fde345c4d6a741e8167c70565c4c3b743b9d" Jan 27 13:01:55 crc kubenswrapper[4900]: I0127 13:01:55.058416 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-2rf6d"] Jan 27 13:01:55 crc kubenswrapper[4900]: I0127 13:01:55.073416 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-2rf6d"] Jan 27 13:01:56 crc kubenswrapper[4900]: I0127 13:01:56.496766 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3331ea7-d796-459a-9e9d-6f744ba8822b" path="/var/lib/kubelet/pods/f3331ea7-d796-459a-9e9d-6f744ba8822b/volumes" Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.420512 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kb2ml"] Jan 27 13:02:11 crc kubenswrapper[4900]: E0127 13:02:11.424298 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="844ec44e-58d8-4d70-8756-ebb19272cf74" containerName="keystone-cron" Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.424404 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="844ec44e-58d8-4d70-8756-ebb19272cf74" containerName="keystone-cron" Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.424758 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="844ec44e-58d8-4d70-8756-ebb19272cf74" containerName="keystone-cron" Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.426969 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kb2ml"] Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.427314 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.484855 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4xpr\" (UniqueName: \"kubernetes.io/projected/932196cf-13a0-47eb-ac1e-84011a7a41cd-kube-api-access-l4xpr\") pod \"redhat-marketplace-kb2ml\" (UID: \"932196cf-13a0-47eb-ac1e-84011a7a41cd\") " pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.485328 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932196cf-13a0-47eb-ac1e-84011a7a41cd-catalog-content\") pod \"redhat-marketplace-kb2ml\" (UID: \"932196cf-13a0-47eb-ac1e-84011a7a41cd\") " pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.485437 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932196cf-13a0-47eb-ac1e-84011a7a41cd-utilities\") pod \"redhat-marketplace-kb2ml\" (UID: \"932196cf-13a0-47eb-ac1e-84011a7a41cd\") " pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.587733 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4xpr\" (UniqueName: \"kubernetes.io/projected/932196cf-13a0-47eb-ac1e-84011a7a41cd-kube-api-access-l4xpr\") pod \"redhat-marketplace-kb2ml\" (UID: \"932196cf-13a0-47eb-ac1e-84011a7a41cd\") " pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.588088 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932196cf-13a0-47eb-ac1e-84011a7a41cd-catalog-content\") pod \"redhat-marketplace-kb2ml\" (UID: \"932196cf-13a0-47eb-ac1e-84011a7a41cd\") " pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.588164 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932196cf-13a0-47eb-ac1e-84011a7a41cd-utilities\") pod \"redhat-marketplace-kb2ml\" (UID: \"932196cf-13a0-47eb-ac1e-84011a7a41cd\") " pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.588790 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932196cf-13a0-47eb-ac1e-84011a7a41cd-catalog-content\") pod \"redhat-marketplace-kb2ml\" (UID: \"932196cf-13a0-47eb-ac1e-84011a7a41cd\") " pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.589179 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932196cf-13a0-47eb-ac1e-84011a7a41cd-utilities\") pod \"redhat-marketplace-kb2ml\" (UID: \"932196cf-13a0-47eb-ac1e-84011a7a41cd\") " pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.613011 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4xpr\" (UniqueName: \"kubernetes.io/projected/932196cf-13a0-47eb-ac1e-84011a7a41cd-kube-api-access-l4xpr\") pod \"redhat-marketplace-kb2ml\" (UID: \"932196cf-13a0-47eb-ac1e-84011a7a41cd\") " pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:11 crc kubenswrapper[4900]: I0127 13:02:11.769101 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:12 crc kubenswrapper[4900]: I0127 13:02:12.293780 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kb2ml"] Jan 27 13:02:12 crc kubenswrapper[4900]: W0127 13:02:12.303277 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod932196cf_13a0_47eb_ac1e_84011a7a41cd.slice/crio-629abafd499f14bd3680d6444aeabea725924fdc592d25e5a114496bb1433343 WatchSource:0}: Error finding container 629abafd499f14bd3680d6444aeabea725924fdc592d25e5a114496bb1433343: Status 404 returned error can't find the container with id 629abafd499f14bd3680d6444aeabea725924fdc592d25e5a114496bb1433343 Jan 27 13:02:12 crc kubenswrapper[4900]: I0127 13:02:12.709712 4900 generic.go:334] "Generic (PLEG): container finished" podID="932196cf-13a0-47eb-ac1e-84011a7a41cd" containerID="263ccbe63fd7196569f13873a8db44c7b48c7963b530f863807245b78f9f8e1a" exitCode=0 Jan 27 13:02:12 crc kubenswrapper[4900]: I0127 13:02:12.709814 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kb2ml" event={"ID":"932196cf-13a0-47eb-ac1e-84011a7a41cd","Type":"ContainerDied","Data":"263ccbe63fd7196569f13873a8db44c7b48c7963b530f863807245b78f9f8e1a"} Jan 27 13:02:12 crc kubenswrapper[4900]: I0127 13:02:12.710005 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kb2ml" event={"ID":"932196cf-13a0-47eb-ac1e-84011a7a41cd","Type":"ContainerStarted","Data":"629abafd499f14bd3680d6444aeabea725924fdc592d25e5a114496bb1433343"} Jan 27 13:02:14 crc kubenswrapper[4900]: I0127 13:02:14.747934 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kb2ml" event={"ID":"932196cf-13a0-47eb-ac1e-84011a7a41cd","Type":"ContainerStarted","Data":"2f44b5386b759297d26c56953f88a387c65eaa011e078ed7cb918d8bb8eecbc6"} Jan 27 13:02:16 crc kubenswrapper[4900]: I0127 13:02:16.776688 4900 generic.go:334] "Generic (PLEG): container finished" podID="932196cf-13a0-47eb-ac1e-84011a7a41cd" containerID="2f44b5386b759297d26c56953f88a387c65eaa011e078ed7cb918d8bb8eecbc6" exitCode=0 Jan 27 13:02:16 crc kubenswrapper[4900]: I0127 13:02:16.776785 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kb2ml" event={"ID":"932196cf-13a0-47eb-ac1e-84011a7a41cd","Type":"ContainerDied","Data":"2f44b5386b759297d26c56953f88a387c65eaa011e078ed7cb918d8bb8eecbc6"} Jan 27 13:02:17 crc kubenswrapper[4900]: I0127 13:02:17.792495 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kb2ml" event={"ID":"932196cf-13a0-47eb-ac1e-84011a7a41cd","Type":"ContainerStarted","Data":"babd6434cb32a75ca60728c5a2ac47a712f28aa5029a813038b3fd68348ae848"} Jan 27 13:02:17 crc kubenswrapper[4900]: I0127 13:02:17.839268 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kb2ml" podStartSLOduration=2.347395838 podStartE2EDuration="6.839230677s" podCreationTimestamp="2026-01-27 13:02:11 +0000 UTC" firstStartedPulling="2026-01-27 13:02:12.712211856 +0000 UTC m=+2159.949240066" lastFinishedPulling="2026-01-27 13:02:17.204046675 +0000 UTC m=+2164.441074905" observedRunningTime="2026-01-27 13:02:17.819268178 +0000 UTC m=+2165.056296388" watchObservedRunningTime="2026-01-27 13:02:17.839230677 +0000 UTC m=+2165.076258927" Jan 27 13:02:21 crc kubenswrapper[4900]: I0127 13:02:21.769331 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:21 crc kubenswrapper[4900]: I0127 13:02:21.770213 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:21 crc kubenswrapper[4900]: I0127 13:02:21.837052 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:22 crc kubenswrapper[4900]: I0127 13:02:22.187236 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6t9jw"] Jan 27 13:02:22 crc kubenswrapper[4900]: I0127 13:02:22.191221 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:22 crc kubenswrapper[4900]: I0127 13:02:22.201029 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6t9jw"] Jan 27 13:02:22 crc kubenswrapper[4900]: I0127 13:02:22.387557 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-catalog-content\") pod \"community-operators-6t9jw\" (UID: \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\") " pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:22 crc kubenswrapper[4900]: I0127 13:02:22.387684 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ckp4\" (UniqueName: \"kubernetes.io/projected/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-kube-api-access-9ckp4\") pod \"community-operators-6t9jw\" (UID: \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\") " pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:22 crc kubenswrapper[4900]: I0127 13:02:22.387856 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-utilities\") pod \"community-operators-6t9jw\" (UID: \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\") " pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:22 crc kubenswrapper[4900]: I0127 13:02:22.492026 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-catalog-content\") pod \"community-operators-6t9jw\" (UID: \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\") " pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:22 crc kubenswrapper[4900]: I0127 13:02:22.492249 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ckp4\" (UniqueName: \"kubernetes.io/projected/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-kube-api-access-9ckp4\") pod \"community-operators-6t9jw\" (UID: \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\") " pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:22 crc kubenswrapper[4900]: I0127 13:02:22.492362 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-utilities\") pod \"community-operators-6t9jw\" (UID: \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\") " pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:22 crc kubenswrapper[4900]: I0127 13:02:22.492550 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-catalog-content\") pod \"community-operators-6t9jw\" (UID: \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\") " pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:22 crc kubenswrapper[4900]: I0127 13:02:22.493010 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-utilities\") pod \"community-operators-6t9jw\" (UID: \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\") " pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:22 crc kubenswrapper[4900]: I0127 13:02:22.517558 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ckp4\" (UniqueName: \"kubernetes.io/projected/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-kube-api-access-9ckp4\") pod \"community-operators-6t9jw\" (UID: \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\") " pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:22 crc kubenswrapper[4900]: I0127 13:02:22.524148 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:23 crc kubenswrapper[4900]: I0127 13:02:23.378984 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6t9jw"] Jan 27 13:02:23 crc kubenswrapper[4900]: W0127 13:02:23.460236 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd02c5394_d0c4_4bd3_9828_fbf8bbe83c7a.slice/crio-b539187b125982bd1122f661c8abac2401cec383ed571a38a6d4bb7b29d297fd WatchSource:0}: Error finding container b539187b125982bd1122f661c8abac2401cec383ed571a38a6d4bb7b29d297fd: Status 404 returned error can't find the container with id b539187b125982bd1122f661c8abac2401cec383ed571a38a6d4bb7b29d297fd Jan 27 13:02:24 crc kubenswrapper[4900]: I0127 13:02:24.005875 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6t9jw" event={"ID":"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a","Type":"ContainerStarted","Data":"17a5153b6be27807f5a536a82b701da89408ecb4c0beadfdb1c307ce648108c3"} Jan 27 13:02:24 crc kubenswrapper[4900]: I0127 13:02:24.006884 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6t9jw" event={"ID":"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a","Type":"ContainerStarted","Data":"b539187b125982bd1122f661c8abac2401cec383ed571a38a6d4bb7b29d297fd"} Jan 27 13:02:25 crc kubenswrapper[4900]: I0127 13:02:25.023662 4900 generic.go:334] "Generic (PLEG): container finished" podID="d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" containerID="17a5153b6be27807f5a536a82b701da89408ecb4c0beadfdb1c307ce648108c3" exitCode=0 Jan 27 13:02:25 crc kubenswrapper[4900]: I0127 13:02:25.023799 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6t9jw" event={"ID":"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a","Type":"ContainerDied","Data":"17a5153b6be27807f5a536a82b701da89408ecb4c0beadfdb1c307ce648108c3"} Jan 27 13:02:26 crc kubenswrapper[4900]: I0127 13:02:26.043470 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6t9jw" event={"ID":"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a","Type":"ContainerStarted","Data":"79eb9150f63b1f1195bb9275bc58a8ecb8a7764d0c115dc50a39130a112e8ef1"} Jan 27 13:02:28 crc kubenswrapper[4900]: I0127 13:02:28.067042 4900 generic.go:334] "Generic (PLEG): container finished" podID="d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" containerID="79eb9150f63b1f1195bb9275bc58a8ecb8a7764d0c115dc50a39130a112e8ef1" exitCode=0 Jan 27 13:02:28 crc kubenswrapper[4900]: I0127 13:02:28.067109 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6t9jw" event={"ID":"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a","Type":"ContainerDied","Data":"79eb9150f63b1f1195bb9275bc58a8ecb8a7764d0c115dc50a39130a112e8ef1"} Jan 27 13:02:29 crc kubenswrapper[4900]: I0127 13:02:29.087010 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6t9jw" event={"ID":"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a","Type":"ContainerStarted","Data":"b8c459a5027c0d2733e2f97ba59abf2c25051de7f2f95429279c25763355ef3b"} Jan 27 13:02:29 crc kubenswrapper[4900]: I0127 13:02:29.129759 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6t9jw" podStartSLOduration=3.464524312 podStartE2EDuration="7.129733099s" podCreationTimestamp="2026-01-27 13:02:22 +0000 UTC" firstStartedPulling="2026-01-27 13:02:25.029456424 +0000 UTC m=+2172.266484644" lastFinishedPulling="2026-01-27 13:02:28.694665211 +0000 UTC m=+2175.931693431" observedRunningTime="2026-01-27 13:02:29.121574542 +0000 UTC m=+2176.358602772" watchObservedRunningTime="2026-01-27 13:02:29.129733099 +0000 UTC m=+2176.366761319" Jan 27 13:02:31 crc kubenswrapper[4900]: I0127 13:02:31.956269 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.015870 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kb2ml"] Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.121272 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kb2ml" podUID="932196cf-13a0-47eb-ac1e-84011a7a41cd" containerName="registry-server" containerID="cri-o://babd6434cb32a75ca60728c5a2ac47a712f28aa5029a813038b3fd68348ae848" gracePeriod=2 Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.524370 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.524514 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.585326 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.680158 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.857751 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932196cf-13a0-47eb-ac1e-84011a7a41cd-catalog-content\") pod \"932196cf-13a0-47eb-ac1e-84011a7a41cd\" (UID: \"932196cf-13a0-47eb-ac1e-84011a7a41cd\") " Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.858092 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4xpr\" (UniqueName: \"kubernetes.io/projected/932196cf-13a0-47eb-ac1e-84011a7a41cd-kube-api-access-l4xpr\") pod \"932196cf-13a0-47eb-ac1e-84011a7a41cd\" (UID: \"932196cf-13a0-47eb-ac1e-84011a7a41cd\") " Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.858264 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932196cf-13a0-47eb-ac1e-84011a7a41cd-utilities\") pod \"932196cf-13a0-47eb-ac1e-84011a7a41cd\" (UID: \"932196cf-13a0-47eb-ac1e-84011a7a41cd\") " Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.859155 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/932196cf-13a0-47eb-ac1e-84011a7a41cd-utilities" (OuterVolumeSpecName: "utilities") pod "932196cf-13a0-47eb-ac1e-84011a7a41cd" (UID: "932196cf-13a0-47eb-ac1e-84011a7a41cd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.859988 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/932196cf-13a0-47eb-ac1e-84011a7a41cd-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.869112 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/932196cf-13a0-47eb-ac1e-84011a7a41cd-kube-api-access-l4xpr" (OuterVolumeSpecName: "kube-api-access-l4xpr") pod "932196cf-13a0-47eb-ac1e-84011a7a41cd" (UID: "932196cf-13a0-47eb-ac1e-84011a7a41cd"). InnerVolumeSpecName "kube-api-access-l4xpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.881038 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/932196cf-13a0-47eb-ac1e-84011a7a41cd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "932196cf-13a0-47eb-ac1e-84011a7a41cd" (UID: "932196cf-13a0-47eb-ac1e-84011a7a41cd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.963273 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/932196cf-13a0-47eb-ac1e-84011a7a41cd-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:02:32 crc kubenswrapper[4900]: I0127 13:02:32.963572 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4xpr\" (UniqueName: \"kubernetes.io/projected/932196cf-13a0-47eb-ac1e-84011a7a41cd-kube-api-access-l4xpr\") on node \"crc\" DevicePath \"\"" Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.137546 4900 generic.go:334] "Generic (PLEG): container finished" podID="932196cf-13a0-47eb-ac1e-84011a7a41cd" containerID="babd6434cb32a75ca60728c5a2ac47a712f28aa5029a813038b3fd68348ae848" exitCode=0 Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.137646 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kb2ml" Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.137626 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kb2ml" event={"ID":"932196cf-13a0-47eb-ac1e-84011a7a41cd","Type":"ContainerDied","Data":"babd6434cb32a75ca60728c5a2ac47a712f28aa5029a813038b3fd68348ae848"} Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.137795 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kb2ml" event={"ID":"932196cf-13a0-47eb-ac1e-84011a7a41cd","Type":"ContainerDied","Data":"629abafd499f14bd3680d6444aeabea725924fdc592d25e5a114496bb1433343"} Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.137821 4900 scope.go:117] "RemoveContainer" containerID="babd6434cb32a75ca60728c5a2ac47a712f28aa5029a813038b3fd68348ae848" Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.214950 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kb2ml"] Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.240757 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kb2ml"] Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.249282 4900 scope.go:117] "RemoveContainer" containerID="2f44b5386b759297d26c56953f88a387c65eaa011e078ed7cb918d8bb8eecbc6" Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.268942 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.311253 4900 scope.go:117] "RemoveContainer" containerID="263ccbe63fd7196569f13873a8db44c7b48c7963b530f863807245b78f9f8e1a" Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.378488 4900 scope.go:117] "RemoveContainer" containerID="babd6434cb32a75ca60728c5a2ac47a712f28aa5029a813038b3fd68348ae848" Jan 27 13:02:33 crc kubenswrapper[4900]: E0127 13:02:33.379870 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"babd6434cb32a75ca60728c5a2ac47a712f28aa5029a813038b3fd68348ae848\": container with ID starting with babd6434cb32a75ca60728c5a2ac47a712f28aa5029a813038b3fd68348ae848 not found: ID does not exist" containerID="babd6434cb32a75ca60728c5a2ac47a712f28aa5029a813038b3fd68348ae848" Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.379932 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"babd6434cb32a75ca60728c5a2ac47a712f28aa5029a813038b3fd68348ae848"} err="failed to get container status \"babd6434cb32a75ca60728c5a2ac47a712f28aa5029a813038b3fd68348ae848\": rpc error: code = NotFound desc = could not find container \"babd6434cb32a75ca60728c5a2ac47a712f28aa5029a813038b3fd68348ae848\": container with ID starting with babd6434cb32a75ca60728c5a2ac47a712f28aa5029a813038b3fd68348ae848 not found: ID does not exist" Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.379964 4900 scope.go:117] "RemoveContainer" containerID="2f44b5386b759297d26c56953f88a387c65eaa011e078ed7cb918d8bb8eecbc6" Jan 27 13:02:33 crc kubenswrapper[4900]: E0127 13:02:33.381016 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f44b5386b759297d26c56953f88a387c65eaa011e078ed7cb918d8bb8eecbc6\": container with ID starting with 2f44b5386b759297d26c56953f88a387c65eaa011e078ed7cb918d8bb8eecbc6 not found: ID does not exist" containerID="2f44b5386b759297d26c56953f88a387c65eaa011e078ed7cb918d8bb8eecbc6" Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.381218 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f44b5386b759297d26c56953f88a387c65eaa011e078ed7cb918d8bb8eecbc6"} err="failed to get container status \"2f44b5386b759297d26c56953f88a387c65eaa011e078ed7cb918d8bb8eecbc6\": rpc error: code = NotFound desc = could not find container \"2f44b5386b759297d26c56953f88a387c65eaa011e078ed7cb918d8bb8eecbc6\": container with ID starting with 2f44b5386b759297d26c56953f88a387c65eaa011e078ed7cb918d8bb8eecbc6 not found: ID does not exist" Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.381326 4900 scope.go:117] "RemoveContainer" containerID="263ccbe63fd7196569f13873a8db44c7b48c7963b530f863807245b78f9f8e1a" Jan 27 13:02:33 crc kubenswrapper[4900]: E0127 13:02:33.381785 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"263ccbe63fd7196569f13873a8db44c7b48c7963b530f863807245b78f9f8e1a\": container with ID starting with 263ccbe63fd7196569f13873a8db44c7b48c7963b530f863807245b78f9f8e1a not found: ID does not exist" containerID="263ccbe63fd7196569f13873a8db44c7b48c7963b530f863807245b78f9f8e1a" Jan 27 13:02:33 crc kubenswrapper[4900]: I0127 13:02:33.381817 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"263ccbe63fd7196569f13873a8db44c7b48c7963b530f863807245b78f9f8e1a"} err="failed to get container status \"263ccbe63fd7196569f13873a8db44c7b48c7963b530f863807245b78f9f8e1a\": rpc error: code = NotFound desc = could not find container \"263ccbe63fd7196569f13873a8db44c7b48c7963b530f863807245b78f9f8e1a\": container with ID starting with 263ccbe63fd7196569f13873a8db44c7b48c7963b530f863807245b78f9f8e1a not found: ID does not exist" Jan 27 13:02:34 crc kubenswrapper[4900]: I0127 13:02:34.406326 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6t9jw"] Jan 27 13:02:34 crc kubenswrapper[4900]: I0127 13:02:34.512216 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="932196cf-13a0-47eb-ac1e-84011a7a41cd" path="/var/lib/kubelet/pods/932196cf-13a0-47eb-ac1e-84011a7a41cd/volumes" Jan 27 13:02:36 crc kubenswrapper[4900]: I0127 13:02:36.179226 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6t9jw" podUID="d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" containerName="registry-server" containerID="cri-o://b8c459a5027c0d2733e2f97ba59abf2c25051de7f2f95429279c25763355ef3b" gracePeriod=2 Jan 27 13:02:36 crc kubenswrapper[4900]: I0127 13:02:36.749723 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:36 crc kubenswrapper[4900]: I0127 13:02:36.911548 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-catalog-content\") pod \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\" (UID: \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\") " Jan 27 13:02:36 crc kubenswrapper[4900]: I0127 13:02:36.911626 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ckp4\" (UniqueName: \"kubernetes.io/projected/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-kube-api-access-9ckp4\") pod \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\" (UID: \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\") " Jan 27 13:02:36 crc kubenswrapper[4900]: I0127 13:02:36.911700 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-utilities\") pod \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\" (UID: \"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a\") " Jan 27 13:02:36 crc kubenswrapper[4900]: I0127 13:02:36.912490 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-utilities" (OuterVolumeSpecName: "utilities") pod "d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" (UID: "d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:02:36 crc kubenswrapper[4900]: I0127 13:02:36.917651 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-kube-api-access-9ckp4" (OuterVolumeSpecName: "kube-api-access-9ckp4") pod "d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" (UID: "d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a"). InnerVolumeSpecName "kube-api-access-9ckp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:02:36 crc kubenswrapper[4900]: I0127 13:02:36.968320 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" (UID: "d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.014848 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.014885 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ckp4\" (UniqueName: \"kubernetes.io/projected/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-kube-api-access-9ckp4\") on node \"crc\" DevicePath \"\"" Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.014909 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.193158 4900 generic.go:334] "Generic (PLEG): container finished" podID="d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" containerID="b8c459a5027c0d2733e2f97ba59abf2c25051de7f2f95429279c25763355ef3b" exitCode=0 Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.193213 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6t9jw" event={"ID":"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a","Type":"ContainerDied","Data":"b8c459a5027c0d2733e2f97ba59abf2c25051de7f2f95429279c25763355ef3b"} Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.193243 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6t9jw" event={"ID":"d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a","Type":"ContainerDied","Data":"b539187b125982bd1122f661c8abac2401cec383ed571a38a6d4bb7b29d297fd"} Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.193261 4900 scope.go:117] "RemoveContainer" containerID="b8c459a5027c0d2733e2f97ba59abf2c25051de7f2f95429279c25763355ef3b" Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.193273 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6t9jw" Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.223388 4900 scope.go:117] "RemoveContainer" containerID="79eb9150f63b1f1195bb9275bc58a8ecb8a7764d0c115dc50a39130a112e8ef1" Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.243246 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6t9jw"] Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.263968 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6t9jw"] Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.279754 4900 scope.go:117] "RemoveContainer" containerID="17a5153b6be27807f5a536a82b701da89408ecb4c0beadfdb1c307ce648108c3" Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.319553 4900 scope.go:117] "RemoveContainer" containerID="b8c459a5027c0d2733e2f97ba59abf2c25051de7f2f95429279c25763355ef3b" Jan 27 13:02:37 crc kubenswrapper[4900]: E0127 13:02:37.320067 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8c459a5027c0d2733e2f97ba59abf2c25051de7f2f95429279c25763355ef3b\": container with ID starting with b8c459a5027c0d2733e2f97ba59abf2c25051de7f2f95429279c25763355ef3b not found: ID does not exist" containerID="b8c459a5027c0d2733e2f97ba59abf2c25051de7f2f95429279c25763355ef3b" Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.320109 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8c459a5027c0d2733e2f97ba59abf2c25051de7f2f95429279c25763355ef3b"} err="failed to get container status \"b8c459a5027c0d2733e2f97ba59abf2c25051de7f2f95429279c25763355ef3b\": rpc error: code = NotFound desc = could not find container \"b8c459a5027c0d2733e2f97ba59abf2c25051de7f2f95429279c25763355ef3b\": container with ID starting with b8c459a5027c0d2733e2f97ba59abf2c25051de7f2f95429279c25763355ef3b not found: ID does not exist" Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.320162 4900 scope.go:117] "RemoveContainer" containerID="79eb9150f63b1f1195bb9275bc58a8ecb8a7764d0c115dc50a39130a112e8ef1" Jan 27 13:02:37 crc kubenswrapper[4900]: E0127 13:02:37.320476 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79eb9150f63b1f1195bb9275bc58a8ecb8a7764d0c115dc50a39130a112e8ef1\": container with ID starting with 79eb9150f63b1f1195bb9275bc58a8ecb8a7764d0c115dc50a39130a112e8ef1 not found: ID does not exist" containerID="79eb9150f63b1f1195bb9275bc58a8ecb8a7764d0c115dc50a39130a112e8ef1" Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.320514 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79eb9150f63b1f1195bb9275bc58a8ecb8a7764d0c115dc50a39130a112e8ef1"} err="failed to get container status \"79eb9150f63b1f1195bb9275bc58a8ecb8a7764d0c115dc50a39130a112e8ef1\": rpc error: code = NotFound desc = could not find container \"79eb9150f63b1f1195bb9275bc58a8ecb8a7764d0c115dc50a39130a112e8ef1\": container with ID starting with 79eb9150f63b1f1195bb9275bc58a8ecb8a7764d0c115dc50a39130a112e8ef1 not found: ID does not exist" Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.320543 4900 scope.go:117] "RemoveContainer" containerID="17a5153b6be27807f5a536a82b701da89408ecb4c0beadfdb1c307ce648108c3" Jan 27 13:02:37 crc kubenswrapper[4900]: E0127 13:02:37.320841 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17a5153b6be27807f5a536a82b701da89408ecb4c0beadfdb1c307ce648108c3\": container with ID starting with 17a5153b6be27807f5a536a82b701da89408ecb4c0beadfdb1c307ce648108c3 not found: ID does not exist" containerID="17a5153b6be27807f5a536a82b701da89408ecb4c0beadfdb1c307ce648108c3" Jan 27 13:02:37 crc kubenswrapper[4900]: I0127 13:02:37.320881 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17a5153b6be27807f5a536a82b701da89408ecb4c0beadfdb1c307ce648108c3"} err="failed to get container status \"17a5153b6be27807f5a536a82b701da89408ecb4c0beadfdb1c307ce648108c3\": rpc error: code = NotFound desc = could not find container \"17a5153b6be27807f5a536a82b701da89408ecb4c0beadfdb1c307ce648108c3\": container with ID starting with 17a5153b6be27807f5a536a82b701da89408ecb4c0beadfdb1c307ce648108c3 not found: ID does not exist" Jan 27 13:02:38 crc kubenswrapper[4900]: I0127 13:02:38.498466 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" path="/var/lib/kubelet/pods/d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a/volumes" Jan 27 13:02:38 crc kubenswrapper[4900]: I0127 13:02:38.807641 4900 scope.go:117] "RemoveContainer" containerID="0532c7a538ec809066d2d2fdc38769e51f4471fad40e2ed23c77bd481b51d99f" Jan 27 13:02:38 crc kubenswrapper[4900]: I0127 13:02:38.843938 4900 scope.go:117] "RemoveContainer" containerID="217dbd43523938257b0b4a51576f4dcb1e6b4682f707f56b660a481b1afa3131" Jan 27 13:03:21 crc kubenswrapper[4900]: I0127 13:03:21.721278 4900 generic.go:334] "Generic (PLEG): container finished" podID="c74db364-6bed-43d8-988a-ed979e4827cf" containerID="dd0ac3c28600323358b447a19cff0d405d6d1f64b22dab33c184538d7c2b5244" exitCode=0 Jan 27 13:03:21 crc kubenswrapper[4900]: I0127 13:03:21.721582 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" event={"ID":"c74db364-6bed-43d8-988a-ed979e4827cf","Type":"ContainerDied","Data":"dd0ac3c28600323358b447a19cff0d405d6d1f64b22dab33c184538d7c2b5244"} Jan 27 13:03:22 crc kubenswrapper[4900]: I0127 13:03:22.372768 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:03:22 crc kubenswrapper[4900]: I0127 13:03:22.373157 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.286181 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.297772 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvs2h\" (UniqueName: \"kubernetes.io/projected/c74db364-6bed-43d8-988a-ed979e4827cf-kube-api-access-rvs2h\") pod \"c74db364-6bed-43d8-988a-ed979e4827cf\" (UID: \"c74db364-6bed-43d8-988a-ed979e4827cf\") " Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.297983 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c74db364-6bed-43d8-988a-ed979e4827cf-inventory\") pod \"c74db364-6bed-43d8-988a-ed979e4827cf\" (UID: \"c74db364-6bed-43d8-988a-ed979e4827cf\") " Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.298019 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c74db364-6bed-43d8-988a-ed979e4827cf-ssh-key-openstack-edpm-ipam\") pod \"c74db364-6bed-43d8-988a-ed979e4827cf\" (UID: \"c74db364-6bed-43d8-988a-ed979e4827cf\") " Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.342791 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c74db364-6bed-43d8-988a-ed979e4827cf-kube-api-access-rvs2h" (OuterVolumeSpecName: "kube-api-access-rvs2h") pod "c74db364-6bed-43d8-988a-ed979e4827cf" (UID: "c74db364-6bed-43d8-988a-ed979e4827cf"). InnerVolumeSpecName "kube-api-access-rvs2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.389419 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c74db364-6bed-43d8-988a-ed979e4827cf-inventory" (OuterVolumeSpecName: "inventory") pod "c74db364-6bed-43d8-988a-ed979e4827cf" (UID: "c74db364-6bed-43d8-988a-ed979e4827cf"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.396316 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c74db364-6bed-43d8-988a-ed979e4827cf-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "c74db364-6bed-43d8-988a-ed979e4827cf" (UID: "c74db364-6bed-43d8-988a-ed979e4827cf"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.405928 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvs2h\" (UniqueName: \"kubernetes.io/projected/c74db364-6bed-43d8-988a-ed979e4827cf-kube-api-access-rvs2h\") on node \"crc\" DevicePath \"\"" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.405963 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c74db364-6bed-43d8-988a-ed979e4827cf-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.405976 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c74db364-6bed-43d8-988a-ed979e4827cf-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.774397 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" event={"ID":"c74db364-6bed-43d8-988a-ed979e4827cf","Type":"ContainerDied","Data":"9cef0d8aecc38e245c5c0ef1051bb84ca315a62ab1311ea6bb40d263b5070d9e"} Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.774461 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9cef0d8aecc38e245c5c0ef1051bb84ca315a62ab1311ea6bb40d263b5070d9e" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.774530 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xssdp" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.858265 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr"] Jan 27 13:03:23 crc kubenswrapper[4900]: E0127 13:03:23.858995 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="932196cf-13a0-47eb-ac1e-84011a7a41cd" containerName="extract-utilities" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.859014 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="932196cf-13a0-47eb-ac1e-84011a7a41cd" containerName="extract-utilities" Jan 27 13:03:23 crc kubenswrapper[4900]: E0127 13:03:23.859075 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" containerName="registry-server" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.859083 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" containerName="registry-server" Jan 27 13:03:23 crc kubenswrapper[4900]: E0127 13:03:23.859099 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="932196cf-13a0-47eb-ac1e-84011a7a41cd" containerName="registry-server" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.859187 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="932196cf-13a0-47eb-ac1e-84011a7a41cd" containerName="registry-server" Jan 27 13:03:23 crc kubenswrapper[4900]: E0127 13:03:23.859206 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c74db364-6bed-43d8-988a-ed979e4827cf" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.859212 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="c74db364-6bed-43d8-988a-ed979e4827cf" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 27 13:03:23 crc kubenswrapper[4900]: E0127 13:03:23.859232 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" containerName="extract-utilities" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.859238 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" containerName="extract-utilities" Jan 27 13:03:23 crc kubenswrapper[4900]: E0127 13:03:23.859260 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="932196cf-13a0-47eb-ac1e-84011a7a41cd" containerName="extract-content" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.859266 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="932196cf-13a0-47eb-ac1e-84011a7a41cd" containerName="extract-content" Jan 27 13:03:23 crc kubenswrapper[4900]: E0127 13:03:23.859280 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" containerName="extract-content" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.859286 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" containerName="extract-content" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.859541 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="932196cf-13a0-47eb-ac1e-84011a7a41cd" containerName="registry-server" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.859566 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="d02c5394-d0c4-4bd3-9828-fbf8bbe83c7a" containerName="registry-server" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.859577 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="c74db364-6bed-43d8-988a-ed979e4827cf" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.860865 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.864460 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.864782 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.865025 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.865225 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.887261 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr"] Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.920353 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2b37daec-8bf1-4131-aaec-f88c604eb143-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr\" (UID: \"2b37daec-8bf1-4131-aaec-f88c604eb143\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.920496 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2b37daec-8bf1-4131-aaec-f88c604eb143-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr\" (UID: \"2b37daec-8bf1-4131-aaec-f88c604eb143\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" Jan 27 13:03:23 crc kubenswrapper[4900]: I0127 13:03:23.920539 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25vqs\" (UniqueName: \"kubernetes.io/projected/2b37daec-8bf1-4131-aaec-f88c604eb143-kube-api-access-25vqs\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr\" (UID: \"2b37daec-8bf1-4131-aaec-f88c604eb143\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" Jan 27 13:03:24 crc kubenswrapper[4900]: I0127 13:03:24.023643 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2b37daec-8bf1-4131-aaec-f88c604eb143-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr\" (UID: \"2b37daec-8bf1-4131-aaec-f88c604eb143\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" Jan 27 13:03:24 crc kubenswrapper[4900]: I0127 13:03:24.023740 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2b37daec-8bf1-4131-aaec-f88c604eb143-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr\" (UID: \"2b37daec-8bf1-4131-aaec-f88c604eb143\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" Jan 27 13:03:24 crc kubenswrapper[4900]: I0127 13:03:24.023775 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25vqs\" (UniqueName: \"kubernetes.io/projected/2b37daec-8bf1-4131-aaec-f88c604eb143-kube-api-access-25vqs\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr\" (UID: \"2b37daec-8bf1-4131-aaec-f88c604eb143\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" Jan 27 13:03:24 crc kubenswrapper[4900]: I0127 13:03:24.029812 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2b37daec-8bf1-4131-aaec-f88c604eb143-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr\" (UID: \"2b37daec-8bf1-4131-aaec-f88c604eb143\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" Jan 27 13:03:24 crc kubenswrapper[4900]: I0127 13:03:24.032322 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2b37daec-8bf1-4131-aaec-f88c604eb143-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr\" (UID: \"2b37daec-8bf1-4131-aaec-f88c604eb143\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" Jan 27 13:03:24 crc kubenswrapper[4900]: I0127 13:03:24.055321 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25vqs\" (UniqueName: \"kubernetes.io/projected/2b37daec-8bf1-4131-aaec-f88c604eb143-kube-api-access-25vqs\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr\" (UID: \"2b37daec-8bf1-4131-aaec-f88c604eb143\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" Jan 27 13:03:24 crc kubenswrapper[4900]: I0127 13:03:24.065034 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-a1f3-account-create-update-7rzqb"] Jan 27 13:03:24 crc kubenswrapper[4900]: I0127 13:03:24.078073 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-a1f3-account-create-update-7rzqb"] Jan 27 13:03:24 crc kubenswrapper[4900]: I0127 13:03:24.196665 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" Jan 27 13:03:24 crc kubenswrapper[4900]: I0127 13:03:24.509438 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bdcd763e-2447-4a8f-a234-223e5c09708d" path="/var/lib/kubelet/pods/bdcd763e-2447-4a8f-a234-223e5c09708d/volumes" Jan 27 13:03:24 crc kubenswrapper[4900]: I0127 13:03:24.827222 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr"] Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.040090 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-9113-account-create-update-gtf6n"] Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.057288 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-kpqlh"] Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.074998 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-l4tgz"] Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.089025 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-9113-account-create-update-gtf6n"] Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.099716 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-0076-account-create-update-jz6pf"] Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.113133 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-kpqlh"] Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.124626 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-qlwsf"] Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.136543 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-0076-account-create-update-jz6pf"] Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.147252 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-qlwsf"] Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.163791 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-l4tgz"] Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.369349 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-656k2"] Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.372963 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.385861 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-656k2"] Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.487111 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-catalog-content\") pod \"certified-operators-656k2\" (UID: \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\") " pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.487219 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-utilities\") pod \"certified-operators-656k2\" (UID: \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\") " pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.487501 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsckz\" (UniqueName: \"kubernetes.io/projected/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-kube-api-access-jsckz\") pod \"certified-operators-656k2\" (UID: \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\") " pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.590153 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-catalog-content\") pod \"certified-operators-656k2\" (UID: \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\") " pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.590691 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-utilities\") pod \"certified-operators-656k2\" (UID: \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\") " pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.590746 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-catalog-content\") pod \"certified-operators-656k2\" (UID: \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\") " pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.591039 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsckz\" (UniqueName: \"kubernetes.io/projected/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-kube-api-access-jsckz\") pod \"certified-operators-656k2\" (UID: \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\") " pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.591298 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-utilities\") pod \"certified-operators-656k2\" (UID: \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\") " pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.613599 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsckz\" (UniqueName: \"kubernetes.io/projected/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-kube-api-access-jsckz\") pod \"certified-operators-656k2\" (UID: \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\") " pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.754622 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.944348 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" event={"ID":"2b37daec-8bf1-4131-aaec-f88c604eb143","Type":"ContainerStarted","Data":"67bf038722add64bce4e74c1a1ebf5d94a9b3a18300201d827cb7b3b8cc4c021"} Jan 27 13:03:25 crc kubenswrapper[4900]: I0127 13:03:25.990353 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" podStartSLOduration=2.32678624 podStartE2EDuration="2.990327194s" podCreationTimestamp="2026-01-27 13:03:23 +0000 UTC" firstStartedPulling="2026-01-27 13:03:24.836450879 +0000 UTC m=+2232.073479089" lastFinishedPulling="2026-01-27 13:03:25.499991833 +0000 UTC m=+2232.737020043" observedRunningTime="2026-01-27 13:03:25.976028689 +0000 UTC m=+2233.213056899" watchObservedRunningTime="2026-01-27 13:03:25.990327194 +0000 UTC m=+2233.227355414" Jan 27 13:03:26 crc kubenswrapper[4900]: I0127 13:03:26.498945 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="275a0058-147e-43c0-b109-5d036591eb61" path="/var/lib/kubelet/pods/275a0058-147e-43c0-b109-5d036591eb61/volumes" Jan 27 13:03:26 crc kubenswrapper[4900]: I0127 13:03:26.501737 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c80a52f-5539-4e14-9953-63525b30928d" path="/var/lib/kubelet/pods/9c80a52f-5539-4e14-9953-63525b30928d/volumes" Jan 27 13:03:26 crc kubenswrapper[4900]: I0127 13:03:26.503031 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1f4bb23-786a-41d3-9b04-9e2cfba9d467" path="/var/lib/kubelet/pods/c1f4bb23-786a-41d3-9b04-9e2cfba9d467/volumes" Jan 27 13:03:26 crc kubenswrapper[4900]: I0127 13:03:26.504419 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca4e1441-8211-48f4-9c31-b0c519574afa" path="/var/lib/kubelet/pods/ca4e1441-8211-48f4-9c31-b0c519574afa/volumes" Jan 27 13:03:26 crc kubenswrapper[4900]: I0127 13:03:26.556148 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db4b3937-5f67-437e-a73e-45ef279cd10b" path="/var/lib/kubelet/pods/db4b3937-5f67-437e-a73e-45ef279cd10b/volumes" Jan 27 13:03:26 crc kubenswrapper[4900]: I0127 13:03:26.613457 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-656k2"] Jan 27 13:03:26 crc kubenswrapper[4900]: I0127 13:03:26.959258 4900 generic.go:334] "Generic (PLEG): container finished" podID="f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" containerID="46cef595cb8c89ad8d0788314509368bfcb57acaba0885cafb743c73266514e9" exitCode=0 Jan 27 13:03:26 crc kubenswrapper[4900]: I0127 13:03:26.959397 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-656k2" event={"ID":"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8","Type":"ContainerDied","Data":"46cef595cb8c89ad8d0788314509368bfcb57acaba0885cafb743c73266514e9"} Jan 27 13:03:26 crc kubenswrapper[4900]: I0127 13:03:26.959489 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-656k2" event={"ID":"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8","Type":"ContainerStarted","Data":"f1bb785eae6a0cf6b4847ae6effc9cef318dde28855a7fa6db0ac9dc72d5b03a"} Jan 27 13:03:26 crc kubenswrapper[4900]: I0127 13:03:26.963232 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" event={"ID":"2b37daec-8bf1-4131-aaec-f88c604eb143","Type":"ContainerStarted","Data":"f973f8a728f36f2d706f1f8aa9264a9e9e73b3a9b6725bbbddc364fa96d0dde8"} Jan 27 13:03:27 crc kubenswrapper[4900]: I0127 13:03:27.975430 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-656k2" event={"ID":"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8","Type":"ContainerStarted","Data":"144ec43563b9ee7268012c5b99b52f525fb71a1e90c98d7ba59995d6c5276892"} Jan 27 13:03:30 crc kubenswrapper[4900]: I0127 13:03:30.005848 4900 generic.go:334] "Generic (PLEG): container finished" podID="f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" containerID="144ec43563b9ee7268012c5b99b52f525fb71a1e90c98d7ba59995d6c5276892" exitCode=0 Jan 27 13:03:30 crc kubenswrapper[4900]: I0127 13:03:30.005975 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-656k2" event={"ID":"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8","Type":"ContainerDied","Data":"144ec43563b9ee7268012c5b99b52f525fb71a1e90c98d7ba59995d6c5276892"} Jan 27 13:03:31 crc kubenswrapper[4900]: I0127 13:03:31.018546 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-656k2" event={"ID":"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8","Type":"ContainerStarted","Data":"51382772db13331ef0ecf6bf4cae9f38c78b37a36af2df1e7a887ddc6d20391b"} Jan 27 13:03:31 crc kubenswrapper[4900]: I0127 13:03:31.047044 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-656k2" podStartSLOduration=2.481884532 podStartE2EDuration="6.047018596s" podCreationTimestamp="2026-01-27 13:03:25 +0000 UTC" firstStartedPulling="2026-01-27 13:03:26.962809238 +0000 UTC m=+2234.199837448" lastFinishedPulling="2026-01-27 13:03:30.527943302 +0000 UTC m=+2237.764971512" observedRunningTime="2026-01-27 13:03:31.039068485 +0000 UTC m=+2238.276096695" watchObservedRunningTime="2026-01-27 13:03:31.047018596 +0000 UTC m=+2238.284046806" Jan 27 13:03:35 crc kubenswrapper[4900]: I0127 13:03:35.755567 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:35 crc kubenswrapper[4900]: I0127 13:03:35.756166 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:35 crc kubenswrapper[4900]: I0127 13:03:35.805820 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:36 crc kubenswrapper[4900]: I0127 13:03:36.129160 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:36 crc kubenswrapper[4900]: I0127 13:03:36.189446 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-656k2"] Jan 27 13:03:38 crc kubenswrapper[4900]: I0127 13:03:38.097163 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-656k2" podUID="f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" containerName="registry-server" containerID="cri-o://51382772db13331ef0ecf6bf4cae9f38c78b37a36af2df1e7a887ddc6d20391b" gracePeriod=2 Jan 27 13:03:38 crc kubenswrapper[4900]: I0127 13:03:38.637977 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:38 crc kubenswrapper[4900]: I0127 13:03:38.668429 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-catalog-content\") pod \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\" (UID: \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\") " Jan 27 13:03:38 crc kubenswrapper[4900]: I0127 13:03:38.668609 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-utilities\") pod \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\" (UID: \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\") " Jan 27 13:03:38 crc kubenswrapper[4900]: I0127 13:03:38.668737 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jsckz\" (UniqueName: \"kubernetes.io/projected/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-kube-api-access-jsckz\") pod \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\" (UID: \"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8\") " Jan 27 13:03:38 crc kubenswrapper[4900]: I0127 13:03:38.669721 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-utilities" (OuterVolumeSpecName: "utilities") pod "f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" (UID: "f6526b1e-3ebf-4fd5-8a11-b5894d01bba8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:03:38 crc kubenswrapper[4900]: I0127 13:03:38.674741 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-kube-api-access-jsckz" (OuterVolumeSpecName: "kube-api-access-jsckz") pod "f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" (UID: "f6526b1e-3ebf-4fd5-8a11-b5894d01bba8"). InnerVolumeSpecName "kube-api-access-jsckz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:03:38 crc kubenswrapper[4900]: I0127 13:03:38.737597 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" (UID: "f6526b1e-3ebf-4fd5-8a11-b5894d01bba8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:03:38 crc kubenswrapper[4900]: I0127 13:03:38.773806 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:03:38 crc kubenswrapper[4900]: I0127 13:03:38.773847 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:03:38 crc kubenswrapper[4900]: I0127 13:03:38.773861 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jsckz\" (UniqueName: \"kubernetes.io/projected/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8-kube-api-access-jsckz\") on node \"crc\" DevicePath \"\"" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.012546 4900 scope.go:117] "RemoveContainer" containerID="64c8085e10dd82ad7e63f992b6e0dbd1401d00a1203c97c4b3e60850e098f82d" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.047495 4900 scope.go:117] "RemoveContainer" containerID="d3b64d582fdd7fb41976581774db0587dea47803fe8bfa1cbba673c4c4bc838c" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.123267 4900 generic.go:334] "Generic (PLEG): container finished" podID="f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" containerID="51382772db13331ef0ecf6bf4cae9f38c78b37a36af2df1e7a887ddc6d20391b" exitCode=0 Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.123373 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-656k2" event={"ID":"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8","Type":"ContainerDied","Data":"51382772db13331ef0ecf6bf4cae9f38c78b37a36af2df1e7a887ddc6d20391b"} Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.123431 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-656k2" event={"ID":"f6526b1e-3ebf-4fd5-8a11-b5894d01bba8","Type":"ContainerDied","Data":"f1bb785eae6a0cf6b4847ae6effc9cef318dde28855a7fa6db0ac9dc72d5b03a"} Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.123461 4900 scope.go:117] "RemoveContainer" containerID="51382772db13331ef0ecf6bf4cae9f38c78b37a36af2df1e7a887ddc6d20391b" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.123694 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-656k2" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.129528 4900 scope.go:117] "RemoveContainer" containerID="651b848533f1c2f5ba8404b692194682878ead8131eaed347c67a4309726163a" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.155891 4900 scope.go:117] "RemoveContainer" containerID="144ec43563b9ee7268012c5b99b52f525fb71a1e90c98d7ba59995d6c5276892" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.165939 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-656k2"] Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.188540 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-656k2"] Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.205144 4900 scope.go:117] "RemoveContainer" containerID="07b28eaa38a70f43bd7c7d41b01065088402b9fc76c4d44b801511519d0f9031" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.233807 4900 scope.go:117] "RemoveContainer" containerID="46cef595cb8c89ad8d0788314509368bfcb57acaba0885cafb743c73266514e9" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.252657 4900 scope.go:117] "RemoveContainer" containerID="9afc15016566d8161257b2a05b5224b48bb4374132701783236bc7ec289f9ba0" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.359255 4900 scope.go:117] "RemoveContainer" containerID="2145cd4eb32f67a2363145b3bb8236bbfe717d049018e3d42f90de5619b713b2" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.371117 4900 scope.go:117] "RemoveContainer" containerID="51382772db13331ef0ecf6bf4cae9f38c78b37a36af2df1e7a887ddc6d20391b" Jan 27 13:03:39 crc kubenswrapper[4900]: E0127 13:03:39.371625 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51382772db13331ef0ecf6bf4cae9f38c78b37a36af2df1e7a887ddc6d20391b\": container with ID starting with 51382772db13331ef0ecf6bf4cae9f38c78b37a36af2df1e7a887ddc6d20391b not found: ID does not exist" containerID="51382772db13331ef0ecf6bf4cae9f38c78b37a36af2df1e7a887ddc6d20391b" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.371657 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51382772db13331ef0ecf6bf4cae9f38c78b37a36af2df1e7a887ddc6d20391b"} err="failed to get container status \"51382772db13331ef0ecf6bf4cae9f38c78b37a36af2df1e7a887ddc6d20391b\": rpc error: code = NotFound desc = could not find container \"51382772db13331ef0ecf6bf4cae9f38c78b37a36af2df1e7a887ddc6d20391b\": container with ID starting with 51382772db13331ef0ecf6bf4cae9f38c78b37a36af2df1e7a887ddc6d20391b not found: ID does not exist" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.371681 4900 scope.go:117] "RemoveContainer" containerID="144ec43563b9ee7268012c5b99b52f525fb71a1e90c98d7ba59995d6c5276892" Jan 27 13:03:39 crc kubenswrapper[4900]: E0127 13:03:39.372043 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"144ec43563b9ee7268012c5b99b52f525fb71a1e90c98d7ba59995d6c5276892\": container with ID starting with 144ec43563b9ee7268012c5b99b52f525fb71a1e90c98d7ba59995d6c5276892 not found: ID does not exist" containerID="144ec43563b9ee7268012c5b99b52f525fb71a1e90c98d7ba59995d6c5276892" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.372090 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"144ec43563b9ee7268012c5b99b52f525fb71a1e90c98d7ba59995d6c5276892"} err="failed to get container status \"144ec43563b9ee7268012c5b99b52f525fb71a1e90c98d7ba59995d6c5276892\": rpc error: code = NotFound desc = could not find container \"144ec43563b9ee7268012c5b99b52f525fb71a1e90c98d7ba59995d6c5276892\": container with ID starting with 144ec43563b9ee7268012c5b99b52f525fb71a1e90c98d7ba59995d6c5276892 not found: ID does not exist" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.372112 4900 scope.go:117] "RemoveContainer" containerID="46cef595cb8c89ad8d0788314509368bfcb57acaba0885cafb743c73266514e9" Jan 27 13:03:39 crc kubenswrapper[4900]: E0127 13:03:39.372364 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46cef595cb8c89ad8d0788314509368bfcb57acaba0885cafb743c73266514e9\": container with ID starting with 46cef595cb8c89ad8d0788314509368bfcb57acaba0885cafb743c73266514e9 not found: ID does not exist" containerID="46cef595cb8c89ad8d0788314509368bfcb57acaba0885cafb743c73266514e9" Jan 27 13:03:39 crc kubenswrapper[4900]: I0127 13:03:39.372389 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46cef595cb8c89ad8d0788314509368bfcb57acaba0885cafb743c73266514e9"} err="failed to get container status \"46cef595cb8c89ad8d0788314509368bfcb57acaba0885cafb743c73266514e9\": rpc error: code = NotFound desc = could not find container \"46cef595cb8c89ad8d0788314509368bfcb57acaba0885cafb743c73266514e9\": container with ID starting with 46cef595cb8c89ad8d0788314509368bfcb57acaba0885cafb743c73266514e9 not found: ID does not exist" Jan 27 13:03:40 crc kubenswrapper[4900]: I0127 13:03:40.499742 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" path="/var/lib/kubelet/pods/f6526b1e-3ebf-4fd5-8a11-b5894d01bba8/volumes" Jan 27 13:03:45 crc kubenswrapper[4900]: I0127 13:03:45.516263 4900 patch_prober.go:28] interesting pod/route-controller-manager-7b4c94f6f7-5v8z8 container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.66:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 13:03:45 crc kubenswrapper[4900]: I0127 13:03:45.516966 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" podUID="af34c04d-2e4e-47f7-919b-5b56554880dc" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.66:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 13:03:50 crc kubenswrapper[4900]: I0127 13:03:50.055651 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-cj2cd"] Jan 27 13:03:50 crc kubenswrapper[4900]: I0127 13:03:50.067808 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-955f-account-create-update-gvjq7"] Jan 27 13:03:50 crc kubenswrapper[4900]: I0127 13:03:50.082349 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-955f-account-create-update-gvjq7"] Jan 27 13:03:50 crc kubenswrapper[4900]: I0127 13:03:50.093083 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-cj2cd"] Jan 27 13:03:50 crc kubenswrapper[4900]: I0127 13:03:50.500264 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65191a52-e21a-4953-acb8-1e524a4bcfa7" path="/var/lib/kubelet/pods/65191a52-e21a-4953-acb8-1e524a4bcfa7/volumes" Jan 27 13:03:50 crc kubenswrapper[4900]: I0127 13:03:50.504081 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74aca284-ea33-4059-9175-c32b2bee89dc" path="/var/lib/kubelet/pods/74aca284-ea33-4059-9175-c32b2bee89dc/volumes" Jan 27 13:03:52 crc kubenswrapper[4900]: I0127 13:03:52.372590 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:03:52 crc kubenswrapper[4900]: I0127 13:03:52.372931 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:03:59 crc kubenswrapper[4900]: I0127 13:03:59.050572 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-hj679"] Jan 27 13:03:59 crc kubenswrapper[4900]: I0127 13:03:59.072505 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-hj679"] Jan 27 13:04:00 crc kubenswrapper[4900]: I0127 13:04:00.500740 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7007f6f8-047f-404d-9094-1ba8d95238a6" path="/var/lib/kubelet/pods/7007f6f8-047f-404d-9094-1ba8d95238a6/volumes" Jan 27 13:04:22 crc kubenswrapper[4900]: I0127 13:04:22.372793 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:04:22 crc kubenswrapper[4900]: I0127 13:04:22.373421 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:04:22 crc kubenswrapper[4900]: I0127 13:04:22.373490 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 13:04:22 crc kubenswrapper[4900]: I0127 13:04:22.374759 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 13:04:22 crc kubenswrapper[4900]: I0127 13:04:22.374837 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" gracePeriod=600 Jan 27 13:04:22 crc kubenswrapper[4900]: E0127 13:04:22.576988 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:04:22 crc kubenswrapper[4900]: I0127 13:04:22.785685 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" exitCode=0 Jan 27 13:04:22 crc kubenswrapper[4900]: I0127 13:04:22.785745 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e"} Jan 27 13:04:22 crc kubenswrapper[4900]: I0127 13:04:22.785788 4900 scope.go:117] "RemoveContainer" containerID="14bb46af03aa708f5b56f9d73717659956f8ecfa5ffc85cf276e4cc701eaafcb" Jan 27 13:04:22 crc kubenswrapper[4900]: I0127 13:04:22.786847 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:04:22 crc kubenswrapper[4900]: E0127 13:04:22.787333 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:04:30 crc kubenswrapper[4900]: I0127 13:04:30.068680 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-tp8r6"] Jan 27 13:04:30 crc kubenswrapper[4900]: I0127 13:04:30.126188 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-tp8r6"] Jan 27 13:04:30 crc kubenswrapper[4900]: I0127 13:04:30.501020 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eff943df-8066-483c-a0c7-0fb3f7346380" path="/var/lib/kubelet/pods/eff943df-8066-483c-a0c7-0fb3f7346380/volumes" Jan 27 13:04:32 crc kubenswrapper[4900]: I0127 13:04:32.907990 4900 generic.go:334] "Generic (PLEG): container finished" podID="2b37daec-8bf1-4131-aaec-f88c604eb143" containerID="f973f8a728f36f2d706f1f8aa9264a9e9e73b3a9b6725bbbddc364fa96d0dde8" exitCode=0 Jan 27 13:04:32 crc kubenswrapper[4900]: I0127 13:04:32.908093 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" event={"ID":"2b37daec-8bf1-4131-aaec-f88c604eb143","Type":"ContainerDied","Data":"f973f8a728f36f2d706f1f8aa9264a9e9e73b3a9b6725bbbddc364fa96d0dde8"} Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:34.483385 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:04:35 crc kubenswrapper[4900]: E0127 13:04:34.484265 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:34.526180 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:34.635006 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2b37daec-8bf1-4131-aaec-f88c604eb143-inventory\") pod \"2b37daec-8bf1-4131-aaec-f88c604eb143\" (UID: \"2b37daec-8bf1-4131-aaec-f88c604eb143\") " Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:34.635110 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25vqs\" (UniqueName: \"kubernetes.io/projected/2b37daec-8bf1-4131-aaec-f88c604eb143-kube-api-access-25vqs\") pod \"2b37daec-8bf1-4131-aaec-f88c604eb143\" (UID: \"2b37daec-8bf1-4131-aaec-f88c604eb143\") " Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:34.635373 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2b37daec-8bf1-4131-aaec-f88c604eb143-ssh-key-openstack-edpm-ipam\") pod \"2b37daec-8bf1-4131-aaec-f88c604eb143\" (UID: \"2b37daec-8bf1-4131-aaec-f88c604eb143\") " Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:34.655472 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b37daec-8bf1-4131-aaec-f88c604eb143-kube-api-access-25vqs" (OuterVolumeSpecName: "kube-api-access-25vqs") pod "2b37daec-8bf1-4131-aaec-f88c604eb143" (UID: "2b37daec-8bf1-4131-aaec-f88c604eb143"). InnerVolumeSpecName "kube-api-access-25vqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:34.738183 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b37daec-8bf1-4131-aaec-f88c604eb143-inventory" (OuterVolumeSpecName: "inventory") pod "2b37daec-8bf1-4131-aaec-f88c604eb143" (UID: "2b37daec-8bf1-4131-aaec-f88c604eb143"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:34.742234 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2b37daec-8bf1-4131-aaec-f88c604eb143-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:34.742263 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25vqs\" (UniqueName: \"kubernetes.io/projected/2b37daec-8bf1-4131-aaec-f88c604eb143-kube-api-access-25vqs\") on node \"crc\" DevicePath \"\"" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:34.774699 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b37daec-8bf1-4131-aaec-f88c604eb143-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "2b37daec-8bf1-4131-aaec-f88c604eb143" (UID: "2b37daec-8bf1-4131-aaec-f88c604eb143"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:34.845422 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2b37daec-8bf1-4131-aaec-f88c604eb143-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:34.957574 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" event={"ID":"2b37daec-8bf1-4131-aaec-f88c604eb143","Type":"ContainerDied","Data":"67bf038722add64bce4e74c1a1ebf5d94a9b3a18300201d827cb7b3b8cc4c021"} Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:34.957648 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="67bf038722add64bce4e74c1a1ebf5d94a9b3a18300201d827cb7b3b8cc4c021" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:34.957657 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.075626 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-cjbb2"] Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.103178 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-cjbb2"] Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.118602 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h"] Jan 27 13:04:35 crc kubenswrapper[4900]: E0127 13:04:35.119812 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b37daec-8bf1-4131-aaec-f88c604eb143" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.119850 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b37daec-8bf1-4131-aaec-f88c604eb143" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 27 13:04:35 crc kubenswrapper[4900]: E0127 13:04:35.119866 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" containerName="registry-server" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.119875 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" containerName="registry-server" Jan 27 13:04:35 crc kubenswrapper[4900]: E0127 13:04:35.119912 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" containerName="extract-utilities" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.119921 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" containerName="extract-utilities" Jan 27 13:04:35 crc kubenswrapper[4900]: E0127 13:04:35.119938 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" containerName="extract-content" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.119948 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" containerName="extract-content" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.120359 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b37daec-8bf1-4131-aaec-f88c604eb143" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.120413 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6526b1e-3ebf-4fd5-8a11-b5894d01bba8" containerName="registry-server" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.122079 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.125314 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.126203 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.126560 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.126681 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.142480 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h"] Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.257340 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk6ct\" (UniqueName: \"kubernetes.io/projected/8d42f237-ff22-4ec0-9897-1da911518528-kube-api-access-hk6ct\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h\" (UID: \"8d42f237-ff22-4ec0-9897-1da911518528\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.257421 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8d42f237-ff22-4ec0-9897-1da911518528-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h\" (UID: \"8d42f237-ff22-4ec0-9897-1da911518528\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.257912 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d42f237-ff22-4ec0-9897-1da911518528-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h\" (UID: \"8d42f237-ff22-4ec0-9897-1da911518528\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.360606 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d42f237-ff22-4ec0-9897-1da911518528-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h\" (UID: \"8d42f237-ff22-4ec0-9897-1da911518528\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.360792 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk6ct\" (UniqueName: \"kubernetes.io/projected/8d42f237-ff22-4ec0-9897-1da911518528-kube-api-access-hk6ct\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h\" (UID: \"8d42f237-ff22-4ec0-9897-1da911518528\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.360858 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8d42f237-ff22-4ec0-9897-1da911518528-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h\" (UID: \"8d42f237-ff22-4ec0-9897-1da911518528\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.365521 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d42f237-ff22-4ec0-9897-1da911518528-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h\" (UID: \"8d42f237-ff22-4ec0-9897-1da911518528\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.372743 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8d42f237-ff22-4ec0-9897-1da911518528-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h\" (UID: \"8d42f237-ff22-4ec0-9897-1da911518528\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.381778 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk6ct\" (UniqueName: \"kubernetes.io/projected/8d42f237-ff22-4ec0-9897-1da911518528-kube-api-access-hk6ct\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h\" (UID: \"8d42f237-ff22-4ec0-9897-1da911518528\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" Jan 27 13:04:35 crc kubenswrapper[4900]: I0127 13:04:35.443942 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" Jan 27 13:04:36 crc kubenswrapper[4900]: I0127 13:04:36.069434 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h"] Jan 27 13:04:36 crc kubenswrapper[4900]: I0127 13:04:36.078778 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 13:04:36 crc kubenswrapper[4900]: I0127 13:04:36.497720 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0c2f998-53fe-4850-a061-6f275bb39313" path="/var/lib/kubelet/pods/c0c2f998-53fe-4850-a061-6f275bb39313/volumes" Jan 27 13:04:36 crc kubenswrapper[4900]: I0127 13:04:36.984852 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" event={"ID":"8d42f237-ff22-4ec0-9897-1da911518528","Type":"ContainerStarted","Data":"4b473db96b98319c78e79cb4b4922a2592c31cd8742a12e3d25b572c59f3bfe9"} Jan 27 13:04:36 crc kubenswrapper[4900]: I0127 13:04:36.985288 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" event={"ID":"8d42f237-ff22-4ec0-9897-1da911518528","Type":"ContainerStarted","Data":"9c81009e5d274c1f91bd00d40b2495cdd0738e8487fe4d22dcaeac026f762131"} Jan 27 13:04:37 crc kubenswrapper[4900]: I0127 13:04:37.009471 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" podStartSLOduration=1.489688725 podStartE2EDuration="2.009451009s" podCreationTimestamp="2026-01-27 13:04:35 +0000 UTC" firstStartedPulling="2026-01-27 13:04:36.078361515 +0000 UTC m=+2303.315389735" lastFinishedPulling="2026-01-27 13:04:36.598123809 +0000 UTC m=+2303.835152019" observedRunningTime="2026-01-27 13:04:37.008427689 +0000 UTC m=+2304.245455899" watchObservedRunningTime="2026-01-27 13:04:37.009451009 +0000 UTC m=+2304.246479219" Jan 27 13:04:39 crc kubenswrapper[4900]: I0127 13:04:39.681750 4900 scope.go:117] "RemoveContainer" containerID="e7008520d5f3b95fb7d0cdd1ee037fd301dba078b9179618ff5b319557f9a286" Jan 27 13:04:39 crc kubenswrapper[4900]: I0127 13:04:39.720445 4900 scope.go:117] "RemoveContainer" containerID="a6f20a452a2479b3b0a8ab5d857f98014bff4d463aea096cf23ede2b3b90a24f" Jan 27 13:04:39 crc kubenswrapper[4900]: I0127 13:04:39.818470 4900 scope.go:117] "RemoveContainer" containerID="dd17466485a03db1dd63a32a13720b1520909966fe30dc93ac38c95b37286c6d" Jan 27 13:04:39 crc kubenswrapper[4900]: I0127 13:04:39.852238 4900 scope.go:117] "RemoveContainer" containerID="8d1022e1c5663d3c9ead4b366419850ad65cbe2406946b46a003d9845f0e9fb4" Jan 27 13:04:39 crc kubenswrapper[4900]: I0127 13:04:39.921617 4900 scope.go:117] "RemoveContainer" containerID="4de009bad480a9183448966c140c5d2899955bb954c2a0f683c730af714203ff" Jan 27 13:04:43 crc kubenswrapper[4900]: I0127 13:04:43.075914 4900 generic.go:334] "Generic (PLEG): container finished" podID="8d42f237-ff22-4ec0-9897-1da911518528" containerID="4b473db96b98319c78e79cb4b4922a2592c31cd8742a12e3d25b572c59f3bfe9" exitCode=0 Jan 27 13:04:43 crc kubenswrapper[4900]: I0127 13:04:43.076007 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" event={"ID":"8d42f237-ff22-4ec0-9897-1da911518528","Type":"ContainerDied","Data":"4b473db96b98319c78e79cb4b4922a2592c31cd8742a12e3d25b572c59f3bfe9"} Jan 27 13:04:44 crc kubenswrapper[4900]: I0127 13:04:44.857225 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" Jan 27 13:04:44 crc kubenswrapper[4900]: I0127 13:04:44.984902 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8d42f237-ff22-4ec0-9897-1da911518528-ssh-key-openstack-edpm-ipam\") pod \"8d42f237-ff22-4ec0-9897-1da911518528\" (UID: \"8d42f237-ff22-4ec0-9897-1da911518528\") " Jan 27 13:04:44 crc kubenswrapper[4900]: I0127 13:04:44.984985 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d42f237-ff22-4ec0-9897-1da911518528-inventory\") pod \"8d42f237-ff22-4ec0-9897-1da911518528\" (UID: \"8d42f237-ff22-4ec0-9897-1da911518528\") " Jan 27 13:04:44 crc kubenswrapper[4900]: I0127 13:04:44.985028 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hk6ct\" (UniqueName: \"kubernetes.io/projected/8d42f237-ff22-4ec0-9897-1da911518528-kube-api-access-hk6ct\") pod \"8d42f237-ff22-4ec0-9897-1da911518528\" (UID: \"8d42f237-ff22-4ec0-9897-1da911518528\") " Jan 27 13:04:44 crc kubenswrapper[4900]: I0127 13:04:44.994557 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d42f237-ff22-4ec0-9897-1da911518528-kube-api-access-hk6ct" (OuterVolumeSpecName: "kube-api-access-hk6ct") pod "8d42f237-ff22-4ec0-9897-1da911518528" (UID: "8d42f237-ff22-4ec0-9897-1da911518528"). InnerVolumeSpecName "kube-api-access-hk6ct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.018239 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d42f237-ff22-4ec0-9897-1da911518528-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "8d42f237-ff22-4ec0-9897-1da911518528" (UID: "8d42f237-ff22-4ec0-9897-1da911518528"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.030559 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d42f237-ff22-4ec0-9897-1da911518528-inventory" (OuterVolumeSpecName: "inventory") pod "8d42f237-ff22-4ec0-9897-1da911518528" (UID: "8d42f237-ff22-4ec0-9897-1da911518528"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.088578 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8d42f237-ff22-4ec0-9897-1da911518528-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.088613 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d42f237-ff22-4ec0-9897-1da911518528-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.088626 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hk6ct\" (UniqueName: \"kubernetes.io/projected/8d42f237-ff22-4ec0-9897-1da911518528-kube-api-access-hk6ct\") on node \"crc\" DevicePath \"\"" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.099863 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" event={"ID":"8d42f237-ff22-4ec0-9897-1da911518528","Type":"ContainerDied","Data":"9c81009e5d274c1f91bd00d40b2495cdd0738e8487fe4d22dcaeac026f762131"} Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.099915 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c81009e5d274c1f91bd00d40b2495cdd0738e8487fe4d22dcaeac026f762131" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.099987 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.238464 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2"] Jan 27 13:04:45 crc kubenswrapper[4900]: E0127 13:04:45.239327 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d42f237-ff22-4ec0-9897-1da911518528" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.239353 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d42f237-ff22-4ec0-9897-1da911518528" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.239679 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d42f237-ff22-4ec0-9897-1da911518528" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.241031 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.244021 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.244054 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.244021 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.244556 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.278687 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2"] Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.396044 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/992f1195-0ec0-44ed-8d53-6c45b556956c-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v8zx2\" (UID: \"992f1195-0ec0-44ed-8d53-6c45b556956c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.396124 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdzrt\" (UniqueName: \"kubernetes.io/projected/992f1195-0ec0-44ed-8d53-6c45b556956c-kube-api-access-hdzrt\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v8zx2\" (UID: \"992f1195-0ec0-44ed-8d53-6c45b556956c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.396150 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/992f1195-0ec0-44ed-8d53-6c45b556956c-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v8zx2\" (UID: \"992f1195-0ec0-44ed-8d53-6c45b556956c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.498822 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/992f1195-0ec0-44ed-8d53-6c45b556956c-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v8zx2\" (UID: \"992f1195-0ec0-44ed-8d53-6c45b556956c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.499176 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdzrt\" (UniqueName: \"kubernetes.io/projected/992f1195-0ec0-44ed-8d53-6c45b556956c-kube-api-access-hdzrt\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v8zx2\" (UID: \"992f1195-0ec0-44ed-8d53-6c45b556956c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.499204 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/992f1195-0ec0-44ed-8d53-6c45b556956c-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v8zx2\" (UID: \"992f1195-0ec0-44ed-8d53-6c45b556956c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.502992 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/992f1195-0ec0-44ed-8d53-6c45b556956c-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v8zx2\" (UID: \"992f1195-0ec0-44ed-8d53-6c45b556956c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.511029 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/992f1195-0ec0-44ed-8d53-6c45b556956c-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v8zx2\" (UID: \"992f1195-0ec0-44ed-8d53-6c45b556956c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.516824 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdzrt\" (UniqueName: \"kubernetes.io/projected/992f1195-0ec0-44ed-8d53-6c45b556956c-kube-api-access-hdzrt\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-v8zx2\" (UID: \"992f1195-0ec0-44ed-8d53-6c45b556956c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" Jan 27 13:04:45 crc kubenswrapper[4900]: I0127 13:04:45.574875 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" Jan 27 13:04:46 crc kubenswrapper[4900]: I0127 13:04:46.162507 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2"] Jan 27 13:04:47 crc kubenswrapper[4900]: I0127 13:04:47.135230 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" event={"ID":"992f1195-0ec0-44ed-8d53-6c45b556956c","Type":"ContainerStarted","Data":"3cf2441f618ef72ad6d96ae55699a37ceb226b0590a16ed7bcd80d39049784e8"} Jan 27 13:04:47 crc kubenswrapper[4900]: I0127 13:04:47.135805 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" event={"ID":"992f1195-0ec0-44ed-8d53-6c45b556956c","Type":"ContainerStarted","Data":"f80de1285510939baea06102b8b7be47200ee14928b786c7aebd47884d9c8d11"} Jan 27 13:04:47 crc kubenswrapper[4900]: I0127 13:04:47.162877 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" podStartSLOduration=1.7217438409999999 podStartE2EDuration="2.162852234s" podCreationTimestamp="2026-01-27 13:04:45 +0000 UTC" firstStartedPulling="2026-01-27 13:04:46.181401051 +0000 UTC m=+2313.418429261" lastFinishedPulling="2026-01-27 13:04:46.622509454 +0000 UTC m=+2313.859537654" observedRunningTime="2026-01-27 13:04:47.151110864 +0000 UTC m=+2314.388139074" watchObservedRunningTime="2026-01-27 13:04:47.162852234 +0000 UTC m=+2314.399880444" Jan 27 13:04:48 crc kubenswrapper[4900]: I0127 13:04:48.483031 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:04:48 crc kubenswrapper[4900]: E0127 13:04:48.483720 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:05:02 crc kubenswrapper[4900]: I0127 13:05:02.481618 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:05:02 crc kubenswrapper[4900]: E0127 13:05:02.482550 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:05:13 crc kubenswrapper[4900]: I0127 13:05:13.482604 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:05:13 crc kubenswrapper[4900]: E0127 13:05:13.483544 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:05:14 crc kubenswrapper[4900]: I0127 13:05:14.051040 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-4rr45"] Jan 27 13:05:14 crc kubenswrapper[4900]: I0127 13:05:14.064163 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-4rr45"] Jan 27 13:05:14 crc kubenswrapper[4900]: I0127 13:05:14.498316 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5730e44a-3c94-46ba-8fd9-c659adcbfa31" path="/var/lib/kubelet/pods/5730e44a-3c94-46ba-8fd9-c659adcbfa31/volumes" Jan 27 13:05:26 crc kubenswrapper[4900]: I0127 13:05:26.494836 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:05:26 crc kubenswrapper[4900]: E0127 13:05:26.496593 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:05:26 crc kubenswrapper[4900]: I0127 13:05:26.685740 4900 generic.go:334] "Generic (PLEG): container finished" podID="992f1195-0ec0-44ed-8d53-6c45b556956c" containerID="3cf2441f618ef72ad6d96ae55699a37ceb226b0590a16ed7bcd80d39049784e8" exitCode=0 Jan 27 13:05:26 crc kubenswrapper[4900]: I0127 13:05:26.685834 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" event={"ID":"992f1195-0ec0-44ed-8d53-6c45b556956c","Type":"ContainerDied","Data":"3cf2441f618ef72ad6d96ae55699a37ceb226b0590a16ed7bcd80d39049784e8"} Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.429090 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.562433 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/992f1195-0ec0-44ed-8d53-6c45b556956c-ssh-key-openstack-edpm-ipam\") pod \"992f1195-0ec0-44ed-8d53-6c45b556956c\" (UID: \"992f1195-0ec0-44ed-8d53-6c45b556956c\") " Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.562532 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdzrt\" (UniqueName: \"kubernetes.io/projected/992f1195-0ec0-44ed-8d53-6c45b556956c-kube-api-access-hdzrt\") pod \"992f1195-0ec0-44ed-8d53-6c45b556956c\" (UID: \"992f1195-0ec0-44ed-8d53-6c45b556956c\") " Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.562592 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/992f1195-0ec0-44ed-8d53-6c45b556956c-inventory\") pod \"992f1195-0ec0-44ed-8d53-6c45b556956c\" (UID: \"992f1195-0ec0-44ed-8d53-6c45b556956c\") " Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.603048 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/992f1195-0ec0-44ed-8d53-6c45b556956c-kube-api-access-hdzrt" (OuterVolumeSpecName: "kube-api-access-hdzrt") pod "992f1195-0ec0-44ed-8d53-6c45b556956c" (UID: "992f1195-0ec0-44ed-8d53-6c45b556956c"). InnerVolumeSpecName "kube-api-access-hdzrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.635298 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/992f1195-0ec0-44ed-8d53-6c45b556956c-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "992f1195-0ec0-44ed-8d53-6c45b556956c" (UID: "992f1195-0ec0-44ed-8d53-6c45b556956c"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.649983 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/992f1195-0ec0-44ed-8d53-6c45b556956c-inventory" (OuterVolumeSpecName: "inventory") pod "992f1195-0ec0-44ed-8d53-6c45b556956c" (UID: "992f1195-0ec0-44ed-8d53-6c45b556956c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.667763 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdzrt\" (UniqueName: \"kubernetes.io/projected/992f1195-0ec0-44ed-8d53-6c45b556956c-kube-api-access-hdzrt\") on node \"crc\" DevicePath \"\"" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.667809 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/992f1195-0ec0-44ed-8d53-6c45b556956c-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.667822 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/992f1195-0ec0-44ed-8d53-6c45b556956c-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.783480 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" event={"ID":"992f1195-0ec0-44ed-8d53-6c45b556956c","Type":"ContainerDied","Data":"f80de1285510939baea06102b8b7be47200ee14928b786c7aebd47884d9c8d11"} Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.783535 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f80de1285510939baea06102b8b7be47200ee14928b786c7aebd47884d9c8d11" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.783915 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-v8zx2" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.879683 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv"] Jan 27 13:05:28 crc kubenswrapper[4900]: E0127 13:05:28.880772 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="992f1195-0ec0-44ed-8d53-6c45b556956c" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.880801 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="992f1195-0ec0-44ed-8d53-6c45b556956c" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.881216 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="992f1195-0ec0-44ed-8d53-6c45b556956c" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.882686 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.889239 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.889838 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.889932 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.890226 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.896244 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv"] Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.979209 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqg85\" (UniqueName: \"kubernetes.io/projected/2ee331bb-6045-48d3-b163-8fcffedf5a0f-kube-api-access-cqg85\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sjflv\" (UID: \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.980259 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2ee331bb-6045-48d3-b163-8fcffedf5a0f-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sjflv\" (UID: \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" Jan 27 13:05:28 crc kubenswrapper[4900]: I0127 13:05:28.980526 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2ee331bb-6045-48d3-b163-8fcffedf5a0f-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sjflv\" (UID: \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" Jan 27 13:05:29 crc kubenswrapper[4900]: I0127 13:05:29.083425 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqg85\" (UniqueName: \"kubernetes.io/projected/2ee331bb-6045-48d3-b163-8fcffedf5a0f-kube-api-access-cqg85\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sjflv\" (UID: \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" Jan 27 13:05:29 crc kubenswrapper[4900]: I0127 13:05:29.083560 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2ee331bb-6045-48d3-b163-8fcffedf5a0f-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sjflv\" (UID: \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" Jan 27 13:05:29 crc kubenswrapper[4900]: I0127 13:05:29.083610 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2ee331bb-6045-48d3-b163-8fcffedf5a0f-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sjflv\" (UID: \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" Jan 27 13:05:29 crc kubenswrapper[4900]: I0127 13:05:29.089393 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2ee331bb-6045-48d3-b163-8fcffedf5a0f-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sjflv\" (UID: \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" Jan 27 13:05:29 crc kubenswrapper[4900]: I0127 13:05:29.102133 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2ee331bb-6045-48d3-b163-8fcffedf5a0f-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sjflv\" (UID: \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" Jan 27 13:05:29 crc kubenswrapper[4900]: I0127 13:05:29.109646 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqg85\" (UniqueName: \"kubernetes.io/projected/2ee331bb-6045-48d3-b163-8fcffedf5a0f-kube-api-access-cqg85\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sjflv\" (UID: \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" Jan 27 13:05:29 crc kubenswrapper[4900]: I0127 13:05:29.211899 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" Jan 27 13:05:29 crc kubenswrapper[4900]: W0127 13:05:29.818482 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ee331bb_6045_48d3_b163_8fcffedf5a0f.slice/crio-424343f04c69528eb1d8973743bb96bc7e01ba622aa554875c7e49ccfb979a1f WatchSource:0}: Error finding container 424343f04c69528eb1d8973743bb96bc7e01ba622aa554875c7e49ccfb979a1f: Status 404 returned error can't find the container with id 424343f04c69528eb1d8973743bb96bc7e01ba622aa554875c7e49ccfb979a1f Jan 27 13:05:29 crc kubenswrapper[4900]: I0127 13:05:29.820427 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv"] Jan 27 13:05:30 crc kubenswrapper[4900]: I0127 13:05:30.808279 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" event={"ID":"2ee331bb-6045-48d3-b163-8fcffedf5a0f","Type":"ContainerStarted","Data":"82916f4957c573c516a7dcafa7ec8ed6ef90277c646f031528ffa8a84e9001b3"} Jan 27 13:05:30 crc kubenswrapper[4900]: I0127 13:05:30.808607 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" event={"ID":"2ee331bb-6045-48d3-b163-8fcffedf5a0f","Type":"ContainerStarted","Data":"424343f04c69528eb1d8973743bb96bc7e01ba622aa554875c7e49ccfb979a1f"} Jan 27 13:05:30 crc kubenswrapper[4900]: I0127 13:05:30.834312 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" podStartSLOduration=2.251308766 podStartE2EDuration="2.834286193s" podCreationTimestamp="2026-01-27 13:05:28 +0000 UTC" firstStartedPulling="2026-01-27 13:05:29.820917084 +0000 UTC m=+2357.057945294" lastFinishedPulling="2026-01-27 13:05:30.403894511 +0000 UTC m=+2357.640922721" observedRunningTime="2026-01-27 13:05:30.828254188 +0000 UTC m=+2358.065282408" watchObservedRunningTime="2026-01-27 13:05:30.834286193 +0000 UTC m=+2358.071314403" Jan 27 13:05:39 crc kubenswrapper[4900]: I0127 13:05:39.482873 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:05:39 crc kubenswrapper[4900]: E0127 13:05:39.484088 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:05:40 crc kubenswrapper[4900]: I0127 13:05:40.110695 4900 scope.go:117] "RemoveContainer" containerID="b878424c8310cb03da552e59b089977e9f157b5a3f749c70249626fc40aa3655" Jan 27 13:05:53 crc kubenswrapper[4900]: I0127 13:05:53.482260 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:05:53 crc kubenswrapper[4900]: E0127 13:05:53.483320 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:06:07 crc kubenswrapper[4900]: I0127 13:06:07.483026 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:06:07 crc kubenswrapper[4900]: E0127 13:06:07.484665 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:06:21 crc kubenswrapper[4900]: I0127 13:06:21.404574 4900 generic.go:334] "Generic (PLEG): container finished" podID="2ee331bb-6045-48d3-b163-8fcffedf5a0f" containerID="82916f4957c573c516a7dcafa7ec8ed6ef90277c646f031528ffa8a84e9001b3" exitCode=0 Jan 27 13:06:21 crc kubenswrapper[4900]: I0127 13:06:21.404611 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" event={"ID":"2ee331bb-6045-48d3-b163-8fcffedf5a0f","Type":"ContainerDied","Data":"82916f4957c573c516a7dcafa7ec8ed6ef90277c646f031528ffa8a84e9001b3"} Jan 27 13:06:21 crc kubenswrapper[4900]: I0127 13:06:21.482673 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:06:21 crc kubenswrapper[4900]: E0127 13:06:21.483236 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:06:22 crc kubenswrapper[4900]: I0127 13:06:22.923978 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.090830 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2ee331bb-6045-48d3-b163-8fcffedf5a0f-ssh-key-openstack-edpm-ipam\") pod \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\" (UID: \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\") " Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.090989 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2ee331bb-6045-48d3-b163-8fcffedf5a0f-inventory\") pod \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\" (UID: \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\") " Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.091266 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqg85\" (UniqueName: \"kubernetes.io/projected/2ee331bb-6045-48d3-b163-8fcffedf5a0f-kube-api-access-cqg85\") pod \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\" (UID: \"2ee331bb-6045-48d3-b163-8fcffedf5a0f\") " Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.103812 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ee331bb-6045-48d3-b163-8fcffedf5a0f-kube-api-access-cqg85" (OuterVolumeSpecName: "kube-api-access-cqg85") pod "2ee331bb-6045-48d3-b163-8fcffedf5a0f" (UID: "2ee331bb-6045-48d3-b163-8fcffedf5a0f"). InnerVolumeSpecName "kube-api-access-cqg85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.129932 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ee331bb-6045-48d3-b163-8fcffedf5a0f-inventory" (OuterVolumeSpecName: "inventory") pod "2ee331bb-6045-48d3-b163-8fcffedf5a0f" (UID: "2ee331bb-6045-48d3-b163-8fcffedf5a0f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.137888 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ee331bb-6045-48d3-b163-8fcffedf5a0f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "2ee331bb-6045-48d3-b163-8fcffedf5a0f" (UID: "2ee331bb-6045-48d3-b163-8fcffedf5a0f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.196502 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqg85\" (UniqueName: \"kubernetes.io/projected/2ee331bb-6045-48d3-b163-8fcffedf5a0f-kube-api-access-cqg85\") on node \"crc\" DevicePath \"\"" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.196548 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2ee331bb-6045-48d3-b163-8fcffedf5a0f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.196560 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2ee331bb-6045-48d3-b163-8fcffedf5a0f-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.432758 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" event={"ID":"2ee331bb-6045-48d3-b163-8fcffedf5a0f","Type":"ContainerDied","Data":"424343f04c69528eb1d8973743bb96bc7e01ba622aa554875c7e49ccfb979a1f"} Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.432826 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="424343f04c69528eb1d8973743bb96bc7e01ba622aa554875c7e49ccfb979a1f" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.432846 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sjflv" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.544387 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-26xlc"] Jan 27 13:06:23 crc kubenswrapper[4900]: E0127 13:06:23.545262 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ee331bb-6045-48d3-b163-8fcffedf5a0f" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.545287 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ee331bb-6045-48d3-b163-8fcffedf5a0f" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.545589 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ee331bb-6045-48d3-b163-8fcffedf5a0f" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.546536 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.550234 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.550460 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.550837 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.550860 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.559273 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-26xlc"] Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.712772 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-26xlc\" (UID: \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\") " pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.712964 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwz7m\" (UniqueName: \"kubernetes.io/projected/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-kube-api-access-jwz7m\") pod \"ssh-known-hosts-edpm-deployment-26xlc\" (UID: \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\") " pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.713126 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-26xlc\" (UID: \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\") " pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.817403 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwz7m\" (UniqueName: \"kubernetes.io/projected/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-kube-api-access-jwz7m\") pod \"ssh-known-hosts-edpm-deployment-26xlc\" (UID: \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\") " pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.817520 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-26xlc\" (UID: \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\") " pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.817876 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-26xlc\" (UID: \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\") " pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.822771 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-26xlc\" (UID: \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\") " pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.828404 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-26xlc\" (UID: \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\") " pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.861398 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwz7m\" (UniqueName: \"kubernetes.io/projected/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-kube-api-access-jwz7m\") pod \"ssh-known-hosts-edpm-deployment-26xlc\" (UID: \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\") " pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" Jan 27 13:06:23 crc kubenswrapper[4900]: I0127 13:06:23.869300 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" Jan 27 13:06:24 crc kubenswrapper[4900]: I0127 13:06:24.595614 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-26xlc"] Jan 27 13:06:25 crc kubenswrapper[4900]: I0127 13:06:25.466512 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" event={"ID":"dc1dd1b1-c791-41c0-b30b-636aa3962cc4","Type":"ContainerStarted","Data":"51e1bcdda057b3c168e12898b0798b6d15631d3859e21b4eaf2047456690f451"} Jan 27 13:06:26 crc kubenswrapper[4900]: I0127 13:06:26.501587 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" event={"ID":"dc1dd1b1-c791-41c0-b30b-636aa3962cc4","Type":"ContainerStarted","Data":"39412bc9f2e25f3ebc66bc9b403392c44c9583c93cb4196d07983f1eef8793d1"} Jan 27 13:06:26 crc kubenswrapper[4900]: I0127 13:06:26.538167 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" podStartSLOduration=2.473697271 podStartE2EDuration="3.538133157s" podCreationTimestamp="2026-01-27 13:06:23 +0000 UTC" firstStartedPulling="2026-01-27 13:06:24.60094145 +0000 UTC m=+2411.837969670" lastFinishedPulling="2026-01-27 13:06:25.665377336 +0000 UTC m=+2412.902405556" observedRunningTime="2026-01-27 13:06:26.53068399 +0000 UTC m=+2413.767712200" watchObservedRunningTime="2026-01-27 13:06:26.538133157 +0000 UTC m=+2413.775161367" Jan 27 13:06:32 crc kubenswrapper[4900]: I0127 13:06:32.552913 4900 generic.go:334] "Generic (PLEG): container finished" podID="dc1dd1b1-c791-41c0-b30b-636aa3962cc4" containerID="39412bc9f2e25f3ebc66bc9b403392c44c9583c93cb4196d07983f1eef8793d1" exitCode=0 Jan 27 13:06:32 crc kubenswrapper[4900]: I0127 13:06:32.552989 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" event={"ID":"dc1dd1b1-c791-41c0-b30b-636aa3962cc4","Type":"ContainerDied","Data":"39412bc9f2e25f3ebc66bc9b403392c44c9583c93cb4196d07983f1eef8793d1"} Jan 27 13:06:33 crc kubenswrapper[4900]: I0127 13:06:33.482371 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:06:33 crc kubenswrapper[4900]: E0127 13:06:33.483008 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.057335 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.134164 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwz7m\" (UniqueName: \"kubernetes.io/projected/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-kube-api-access-jwz7m\") pod \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\" (UID: \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\") " Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.134738 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-ssh-key-openstack-edpm-ipam\") pod \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\" (UID: \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\") " Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.134916 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-inventory-0\") pod \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\" (UID: \"dc1dd1b1-c791-41c0-b30b-636aa3962cc4\") " Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.142571 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-kube-api-access-jwz7m" (OuterVolumeSpecName: "kube-api-access-jwz7m") pod "dc1dd1b1-c791-41c0-b30b-636aa3962cc4" (UID: "dc1dd1b1-c791-41c0-b30b-636aa3962cc4"). InnerVolumeSpecName "kube-api-access-jwz7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.172870 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "dc1dd1b1-c791-41c0-b30b-636aa3962cc4" (UID: "dc1dd1b1-c791-41c0-b30b-636aa3962cc4"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.178331 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "dc1dd1b1-c791-41c0-b30b-636aa3962cc4" (UID: "dc1dd1b1-c791-41c0-b30b-636aa3962cc4"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.239307 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.239355 4900 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-inventory-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.239369 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwz7m\" (UniqueName: \"kubernetes.io/projected/dc1dd1b1-c791-41c0-b30b-636aa3962cc4-kube-api-access-jwz7m\") on node \"crc\" DevicePath \"\"" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.576832 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" event={"ID":"dc1dd1b1-c791-41c0-b30b-636aa3962cc4","Type":"ContainerDied","Data":"51e1bcdda057b3c168e12898b0798b6d15631d3859e21b4eaf2047456690f451"} Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.577109 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="51e1bcdda057b3c168e12898b0798b6d15631d3859e21b4eaf2047456690f451" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.577124 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-26xlc" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.680588 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw"] Jan 27 13:06:34 crc kubenswrapper[4900]: E0127 13:06:34.681330 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc1dd1b1-c791-41c0-b30b-636aa3962cc4" containerName="ssh-known-hosts-edpm-deployment" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.681352 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc1dd1b1-c791-41c0-b30b-636aa3962cc4" containerName="ssh-known-hosts-edpm-deployment" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.681657 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc1dd1b1-c791-41c0-b30b-636aa3962cc4" containerName="ssh-known-hosts-edpm-deployment" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.682662 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.685024 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.685439 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.685671 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.685979 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.697524 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw"] Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.859727 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wf5hw\" (UID: \"995aa162-c9a7-47ef-8ad2-6796321e6306\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.859921 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29sch\" (UniqueName: \"kubernetes.io/projected/995aa162-c9a7-47ef-8ad2-6796321e6306-kube-api-access-29sch\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wf5hw\" (UID: \"995aa162-c9a7-47ef-8ad2-6796321e6306\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.859979 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wf5hw\" (UID: \"995aa162-c9a7-47ef-8ad2-6796321e6306\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.963607 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wf5hw\" (UID: \"995aa162-c9a7-47ef-8ad2-6796321e6306\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.963847 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29sch\" (UniqueName: \"kubernetes.io/projected/995aa162-c9a7-47ef-8ad2-6796321e6306-kube-api-access-29sch\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wf5hw\" (UID: \"995aa162-c9a7-47ef-8ad2-6796321e6306\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.963956 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wf5hw\" (UID: \"995aa162-c9a7-47ef-8ad2-6796321e6306\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.968634 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wf5hw\" (UID: \"995aa162-c9a7-47ef-8ad2-6796321e6306\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.968653 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wf5hw\" (UID: \"995aa162-c9a7-47ef-8ad2-6796321e6306\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" Jan 27 13:06:34 crc kubenswrapper[4900]: I0127 13:06:34.981840 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29sch\" (UniqueName: \"kubernetes.io/projected/995aa162-c9a7-47ef-8ad2-6796321e6306-kube-api-access-29sch\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-wf5hw\" (UID: \"995aa162-c9a7-47ef-8ad2-6796321e6306\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" Jan 27 13:06:35 crc kubenswrapper[4900]: I0127 13:06:35.001974 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" Jan 27 13:06:35 crc kubenswrapper[4900]: I0127 13:06:35.658602 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw"] Jan 27 13:06:36 crc kubenswrapper[4900]: I0127 13:06:36.602714 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" event={"ID":"995aa162-c9a7-47ef-8ad2-6796321e6306","Type":"ContainerStarted","Data":"3f7633ca8efeaacd0510f4f2d2ce1b356c7f361b09f9362503430012ebed324a"} Jan 27 13:06:36 crc kubenswrapper[4900]: I0127 13:06:36.603111 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" event={"ID":"995aa162-c9a7-47ef-8ad2-6796321e6306","Type":"ContainerStarted","Data":"1eafea21684e177d40cd7cfab9b983cc784a0df243540d163c21ea46ac2b55d8"} Jan 27 13:06:36 crc kubenswrapper[4900]: I0127 13:06:36.621802 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" podStartSLOduration=2.070078832 podStartE2EDuration="2.621776112s" podCreationTimestamp="2026-01-27 13:06:34 +0000 UTC" firstStartedPulling="2026-01-27 13:06:35.654848531 +0000 UTC m=+2422.891876741" lastFinishedPulling="2026-01-27 13:06:36.206545811 +0000 UTC m=+2423.443574021" observedRunningTime="2026-01-27 13:06:36.617077445 +0000 UTC m=+2423.854105655" watchObservedRunningTime="2026-01-27 13:06:36.621776112 +0000 UTC m=+2423.858804322" Jan 27 13:06:44 crc kubenswrapper[4900]: I0127 13:06:44.482264 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:06:44 crc kubenswrapper[4900]: E0127 13:06:44.483147 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:06:44 crc kubenswrapper[4900]: I0127 13:06:44.696382 4900 generic.go:334] "Generic (PLEG): container finished" podID="995aa162-c9a7-47ef-8ad2-6796321e6306" containerID="3f7633ca8efeaacd0510f4f2d2ce1b356c7f361b09f9362503430012ebed324a" exitCode=0 Jan 27 13:06:44 crc kubenswrapper[4900]: I0127 13:06:44.696434 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" event={"ID":"995aa162-c9a7-47ef-8ad2-6796321e6306","Type":"ContainerDied","Data":"3f7633ca8efeaacd0510f4f2d2ce1b356c7f361b09f9362503430012ebed324a"} Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.264165 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.399738 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29sch\" (UniqueName: \"kubernetes.io/projected/995aa162-c9a7-47ef-8ad2-6796321e6306-kube-api-access-29sch\") pod \"995aa162-c9a7-47ef-8ad2-6796321e6306\" (UID: \"995aa162-c9a7-47ef-8ad2-6796321e6306\") " Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.399872 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-inventory\") pod \"995aa162-c9a7-47ef-8ad2-6796321e6306\" (UID: \"995aa162-c9a7-47ef-8ad2-6796321e6306\") " Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.400001 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-ssh-key-openstack-edpm-ipam\") pod \"995aa162-c9a7-47ef-8ad2-6796321e6306\" (UID: \"995aa162-c9a7-47ef-8ad2-6796321e6306\") " Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.405571 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/995aa162-c9a7-47ef-8ad2-6796321e6306-kube-api-access-29sch" (OuterVolumeSpecName: "kube-api-access-29sch") pod "995aa162-c9a7-47ef-8ad2-6796321e6306" (UID: "995aa162-c9a7-47ef-8ad2-6796321e6306"). InnerVolumeSpecName "kube-api-access-29sch". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:06:46 crc kubenswrapper[4900]: E0127 13:06:46.430659 4900 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-ssh-key-openstack-edpm-ipam podName:995aa162-c9a7-47ef-8ad2-6796321e6306 nodeName:}" failed. No retries permitted until 2026-01-27 13:06:46.930628202 +0000 UTC m=+2434.167656412 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "ssh-key-openstack-edpm-ipam" (UniqueName: "kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-ssh-key-openstack-edpm-ipam") pod "995aa162-c9a7-47ef-8ad2-6796321e6306" (UID: "995aa162-c9a7-47ef-8ad2-6796321e6306") : error deleting /var/lib/kubelet/pods/995aa162-c9a7-47ef-8ad2-6796321e6306/volume-subpaths: remove /var/lib/kubelet/pods/995aa162-c9a7-47ef-8ad2-6796321e6306/volume-subpaths: no such file or directory Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.434520 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-inventory" (OuterVolumeSpecName: "inventory") pod "995aa162-c9a7-47ef-8ad2-6796321e6306" (UID: "995aa162-c9a7-47ef-8ad2-6796321e6306"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.504813 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.504858 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29sch\" (UniqueName: \"kubernetes.io/projected/995aa162-c9a7-47ef-8ad2-6796321e6306-kube-api-access-29sch\") on node \"crc\" DevicePath \"\"" Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.724623 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" event={"ID":"995aa162-c9a7-47ef-8ad2-6796321e6306","Type":"ContainerDied","Data":"1eafea21684e177d40cd7cfab9b983cc784a0df243540d163c21ea46ac2b55d8"} Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.725090 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1eafea21684e177d40cd7cfab9b983cc784a0df243540d163c21ea46ac2b55d8" Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.724711 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-wf5hw" Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.857651 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv"] Jan 27 13:06:46 crc kubenswrapper[4900]: E0127 13:06:46.876635 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="995aa162-c9a7-47ef-8ad2-6796321e6306" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.876678 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="995aa162-c9a7-47ef-8ad2-6796321e6306" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.876944 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="995aa162-c9a7-47ef-8ad2-6796321e6306" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.877919 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.891094 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv"] Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.923504 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07b0138c-ca14-45fe-99d6-9474434a1eef-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv\" (UID: \"07b0138c-ca14-45fe-99d6-9474434a1eef\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.923727 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcndj\" (UniqueName: \"kubernetes.io/projected/07b0138c-ca14-45fe-99d6-9474434a1eef-kube-api-access-kcndj\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv\" (UID: \"07b0138c-ca14-45fe-99d6-9474434a1eef\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" Jan 27 13:06:46 crc kubenswrapper[4900]: I0127 13:06:46.923926 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07b0138c-ca14-45fe-99d6-9474434a1eef-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv\" (UID: \"07b0138c-ca14-45fe-99d6-9474434a1eef\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" Jan 27 13:06:47 crc kubenswrapper[4900]: I0127 13:06:47.030654 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-ssh-key-openstack-edpm-ipam\") pod \"995aa162-c9a7-47ef-8ad2-6796321e6306\" (UID: \"995aa162-c9a7-47ef-8ad2-6796321e6306\") " Jan 27 13:06:47 crc kubenswrapper[4900]: I0127 13:06:47.031943 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07b0138c-ca14-45fe-99d6-9474434a1eef-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv\" (UID: \"07b0138c-ca14-45fe-99d6-9474434a1eef\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" Jan 27 13:06:47 crc kubenswrapper[4900]: I0127 13:06:47.034952 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07b0138c-ca14-45fe-99d6-9474434a1eef-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv\" (UID: \"07b0138c-ca14-45fe-99d6-9474434a1eef\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" Jan 27 13:06:47 crc kubenswrapper[4900]: I0127 13:06:47.035135 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcndj\" (UniqueName: \"kubernetes.io/projected/07b0138c-ca14-45fe-99d6-9474434a1eef-kube-api-access-kcndj\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv\" (UID: \"07b0138c-ca14-45fe-99d6-9474434a1eef\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" Jan 27 13:06:47 crc kubenswrapper[4900]: I0127 13:06:47.038012 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07b0138c-ca14-45fe-99d6-9474434a1eef-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv\" (UID: \"07b0138c-ca14-45fe-99d6-9474434a1eef\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" Jan 27 13:06:47 crc kubenswrapper[4900]: I0127 13:06:47.039045 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07b0138c-ca14-45fe-99d6-9474434a1eef-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv\" (UID: \"07b0138c-ca14-45fe-99d6-9474434a1eef\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" Jan 27 13:06:47 crc kubenswrapper[4900]: I0127 13:06:47.044249 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "995aa162-c9a7-47ef-8ad2-6796321e6306" (UID: "995aa162-c9a7-47ef-8ad2-6796321e6306"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:06:47 crc kubenswrapper[4900]: I0127 13:06:47.067168 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcndj\" (UniqueName: \"kubernetes.io/projected/07b0138c-ca14-45fe-99d6-9474434a1eef-kube-api-access-kcndj\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv\" (UID: \"07b0138c-ca14-45fe-99d6-9474434a1eef\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" Jan 27 13:06:47 crc kubenswrapper[4900]: I0127 13:06:47.139029 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/995aa162-c9a7-47ef-8ad2-6796321e6306-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:06:47 crc kubenswrapper[4900]: I0127 13:06:47.210007 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" Jan 27 13:06:47 crc kubenswrapper[4900]: I0127 13:06:47.750810 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv"] Jan 27 13:06:48 crc kubenswrapper[4900]: I0127 13:06:48.753269 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" event={"ID":"07b0138c-ca14-45fe-99d6-9474434a1eef","Type":"ContainerStarted","Data":"caaf4dfd03b97587f58061d45f89964d88137a8fe67ed94faea00f643dc19c05"} Jan 27 13:06:48 crc kubenswrapper[4900]: I0127 13:06:48.754305 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" event={"ID":"07b0138c-ca14-45fe-99d6-9474434a1eef","Type":"ContainerStarted","Data":"90352d5f490e5faff76b8e35111e95376129afb6d0e24bc50489a0438b16720f"} Jan 27 13:06:48 crc kubenswrapper[4900]: I0127 13:06:48.783779 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" podStartSLOduration=2.376661785 podStartE2EDuration="2.783754739s" podCreationTimestamp="2026-01-27 13:06:46 +0000 UTC" firstStartedPulling="2026-01-27 13:06:47.76493862 +0000 UTC m=+2435.001966820" lastFinishedPulling="2026-01-27 13:06:48.172031564 +0000 UTC m=+2435.409059774" observedRunningTime="2026-01-27 13:06:48.77555328 +0000 UTC m=+2436.012581500" watchObservedRunningTime="2026-01-27 13:06:48.783754739 +0000 UTC m=+2436.020782969" Jan 27 13:06:53 crc kubenswrapper[4900]: I0127 13:06:53.318690 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-z6j69"] Jan 27 13:06:53 crc kubenswrapper[4900]: I0127 13:06:53.330096 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-z6j69"] Jan 27 13:06:54 crc kubenswrapper[4900]: I0127 13:06:54.503523 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01b46bc7-748f-4872-abb3-a7faef291c0b" path="/var/lib/kubelet/pods/01b46bc7-748f-4872-abb3-a7faef291c0b/volumes" Jan 27 13:06:55 crc kubenswrapper[4900]: I0127 13:06:55.482765 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:06:55 crc kubenswrapper[4900]: E0127 13:06:55.483550 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:06:58 crc kubenswrapper[4900]: I0127 13:06:58.870700 4900 generic.go:334] "Generic (PLEG): container finished" podID="07b0138c-ca14-45fe-99d6-9474434a1eef" containerID="caaf4dfd03b97587f58061d45f89964d88137a8fe67ed94faea00f643dc19c05" exitCode=0 Jan 27 13:06:58 crc kubenswrapper[4900]: I0127 13:06:58.870761 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" event={"ID":"07b0138c-ca14-45fe-99d6-9474434a1eef","Type":"ContainerDied","Data":"caaf4dfd03b97587f58061d45f89964d88137a8fe67ed94faea00f643dc19c05"} Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.394417 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.402492 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kcndj\" (UniqueName: \"kubernetes.io/projected/07b0138c-ca14-45fe-99d6-9474434a1eef-kube-api-access-kcndj\") pod \"07b0138c-ca14-45fe-99d6-9474434a1eef\" (UID: \"07b0138c-ca14-45fe-99d6-9474434a1eef\") " Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.402621 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07b0138c-ca14-45fe-99d6-9474434a1eef-ssh-key-openstack-edpm-ipam\") pod \"07b0138c-ca14-45fe-99d6-9474434a1eef\" (UID: \"07b0138c-ca14-45fe-99d6-9474434a1eef\") " Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.402650 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07b0138c-ca14-45fe-99d6-9474434a1eef-inventory\") pod \"07b0138c-ca14-45fe-99d6-9474434a1eef\" (UID: \"07b0138c-ca14-45fe-99d6-9474434a1eef\") " Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.409163 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07b0138c-ca14-45fe-99d6-9474434a1eef-kube-api-access-kcndj" (OuterVolumeSpecName: "kube-api-access-kcndj") pod "07b0138c-ca14-45fe-99d6-9474434a1eef" (UID: "07b0138c-ca14-45fe-99d6-9474434a1eef"). InnerVolumeSpecName "kube-api-access-kcndj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.443976 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07b0138c-ca14-45fe-99d6-9474434a1eef-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "07b0138c-ca14-45fe-99d6-9474434a1eef" (UID: "07b0138c-ca14-45fe-99d6-9474434a1eef"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.446570 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07b0138c-ca14-45fe-99d6-9474434a1eef-inventory" (OuterVolumeSpecName: "inventory") pod "07b0138c-ca14-45fe-99d6-9474434a1eef" (UID: "07b0138c-ca14-45fe-99d6-9474434a1eef"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.507445 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kcndj\" (UniqueName: \"kubernetes.io/projected/07b0138c-ca14-45fe-99d6-9474434a1eef-kube-api-access-kcndj\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.507483 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07b0138c-ca14-45fe-99d6-9474434a1eef-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.507495 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07b0138c-ca14-45fe-99d6-9474434a1eef-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.894301 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" event={"ID":"07b0138c-ca14-45fe-99d6-9474434a1eef","Type":"ContainerDied","Data":"90352d5f490e5faff76b8e35111e95376129afb6d0e24bc50489a0438b16720f"} Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.894621 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="90352d5f490e5faff76b8e35111e95376129afb6d0e24bc50489a0438b16720f" Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.894327 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv" Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.993552 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh"] Jan 27 13:07:00 crc kubenswrapper[4900]: E0127 13:07:00.994197 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07b0138c-ca14-45fe-99d6-9474434a1eef" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.994217 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="07b0138c-ca14-45fe-99d6-9474434a1eef" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.994476 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="07b0138c-ca14-45fe-99d6-9474434a1eef" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 27 13:07:00 crc kubenswrapper[4900]: I0127 13:07:00.995602 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.004627 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.005313 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.005491 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.005773 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.005975 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.006145 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.006302 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.006618 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.006743 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.011928 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh"] Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.019954 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfvp9\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-kube-api-access-cfvp9\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.020037 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.020129 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.020203 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.020250 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.020298 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.021430 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.021496 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.021734 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.021960 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.022074 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.022422 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.022636 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.022837 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.022874 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.022971 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125286 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125385 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125456 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125522 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125577 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125593 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125625 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125685 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfvp9\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-kube-api-access-cfvp9\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125774 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125798 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125828 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125855 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125886 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125914 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125932 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.125956 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.131477 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.132470 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.133653 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.133718 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.133978 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.134047 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.134468 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.135732 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.136116 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.136267 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.136388 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.137136 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.139196 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.140571 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.144430 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.146350 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfvp9\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-kube-api-access-cfvp9\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:01 crc kubenswrapper[4900]: I0127 13:07:01.583947 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:02 crc kubenswrapper[4900]: I0127 13:07:02.188627 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh"] Jan 27 13:07:02 crc kubenswrapper[4900]: I0127 13:07:02.927138 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" event={"ID":"8b0ea52c-09c3-4bf6-9c72-ae576a224b72","Type":"ContainerStarted","Data":"84a7f23945fdab6c65f0b2f39523c07e15231a9ce5c2b230bd712b34c5461aee"} Jan 27 13:07:03 crc kubenswrapper[4900]: I0127 13:07:03.943714 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" event={"ID":"8b0ea52c-09c3-4bf6-9c72-ae576a224b72","Type":"ContainerStarted","Data":"bd8b70fbb10fcfa3fd62b069c2505c071c4c10ce1d2d39721aff2746f6f39f26"} Jan 27 13:07:03 crc kubenswrapper[4900]: I0127 13:07:03.972446 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" podStartSLOduration=3.38593007 podStartE2EDuration="3.972399231s" podCreationTimestamp="2026-01-27 13:07:00 +0000 UTC" firstStartedPulling="2026-01-27 13:07:02.191364187 +0000 UTC m=+2449.428392397" lastFinishedPulling="2026-01-27 13:07:02.777833348 +0000 UTC m=+2450.014861558" observedRunningTime="2026-01-27 13:07:03.966714025 +0000 UTC m=+2451.203742255" watchObservedRunningTime="2026-01-27 13:07:03.972399231 +0000 UTC m=+2451.209427471" Jan 27 13:07:09 crc kubenswrapper[4900]: I0127 13:07:09.482851 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:07:09 crc kubenswrapper[4900]: E0127 13:07:09.483715 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:07:21 crc kubenswrapper[4900]: I0127 13:07:21.484008 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:07:21 crc kubenswrapper[4900]: E0127 13:07:21.487493 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:07:33 crc kubenswrapper[4900]: I0127 13:07:33.482416 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:07:33 crc kubenswrapper[4900]: E0127 13:07:33.483380 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:07:37 crc kubenswrapper[4900]: I0127 13:07:37.050743 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-g6snh"] Jan 27 13:07:37 crc kubenswrapper[4900]: I0127 13:07:37.062350 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-g6snh"] Jan 27 13:07:38 crc kubenswrapper[4900]: I0127 13:07:38.495157 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b" path="/var/lib/kubelet/pods/a0f9ee9b-f180-4cde-938a-1fd3a8cbc31b/volumes" Jan 27 13:07:40 crc kubenswrapper[4900]: I0127 13:07:40.259956 4900 scope.go:117] "RemoveContainer" containerID="54dc1e99596bff522866900a49ba07dbffa3d9d02aa9300147907b1c31d64bec" Jan 27 13:07:40 crc kubenswrapper[4900]: I0127 13:07:40.306438 4900 scope.go:117] "RemoveContainer" containerID="2650982d4f0073af8e6f563035a4cd99683b2d48615435fd802cea1d651a7826" Jan 27 13:07:46 crc kubenswrapper[4900]: I0127 13:07:46.493948 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:07:46 crc kubenswrapper[4900]: E0127 13:07:46.495141 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:07:50 crc kubenswrapper[4900]: I0127 13:07:50.616703 4900 generic.go:334] "Generic (PLEG): container finished" podID="8b0ea52c-09c3-4bf6-9c72-ae576a224b72" containerID="bd8b70fbb10fcfa3fd62b069c2505c071c4c10ce1d2d39721aff2746f6f39f26" exitCode=0 Jan 27 13:07:50 crc kubenswrapper[4900]: I0127 13:07:50.616813 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" event={"ID":"8b0ea52c-09c3-4bf6-9c72-ae576a224b72","Type":"ContainerDied","Data":"bd8b70fbb10fcfa3fd62b069c2505c071c4c10ce1d2d39721aff2746f6f39f26"} Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.150830 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.179437 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfvp9\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-kube-api-access-cfvp9\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.179518 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.179561 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-ovn-combined-ca-bundle\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.179600 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-telemetry-combined-ca-bundle\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.179750 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-telemetry-power-monitoring-combined-ca-bundle\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.179780 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.179817 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-inventory\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.179848 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-neutron-metadata-combined-ca-bundle\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.179892 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.179975 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-repo-setup-combined-ca-bundle\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.181015 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-nova-combined-ca-bundle\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.181097 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-ssh-key-openstack-edpm-ipam\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.181156 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.181245 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-libvirt-combined-ca-bundle\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.181275 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-ovn-default-certs-0\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.181294 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-bootstrap-combined-ca-bundle\") pod \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\" (UID: \"8b0ea52c-09c3-4bf6-9c72-ae576a224b72\") " Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.187716 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.189452 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.191347 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.191545 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.191553 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.191639 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.191929 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-kube-api-access-cfvp9" (OuterVolumeSpecName: "kube-api-access-cfvp9") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "kube-api-access-cfvp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.192667 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.194727 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.201354 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.201533 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.204452 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.206282 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.209684 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.234825 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.236201 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-inventory" (OuterVolumeSpecName: "inventory") pod "8b0ea52c-09c3-4bf6-9c72-ae576a224b72" (UID: "8b0ea52c-09c3-4bf6-9c72-ae576a224b72"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.286491 4900 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.286887 4900 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.287020 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.287183 4900 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.287307 4900 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.287433 4900 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.287574 4900 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.287693 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfvp9\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-kube-api-access-cfvp9\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.287849 4900 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.287987 4900 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.288233 4900 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.288408 4900 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.288574 4900 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.288701 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.288852 4900 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.289034 4900 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/8b0ea52c-09c3-4bf6-9c72-ae576a224b72-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.644395 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" event={"ID":"8b0ea52c-09c3-4bf6-9c72-ae576a224b72","Type":"ContainerDied","Data":"84a7f23945fdab6c65f0b2f39523c07e15231a9ce5c2b230bd712b34c5461aee"} Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.644516 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84a7f23945fdab6c65f0b2f39523c07e15231a9ce5c2b230bd712b34c5461aee" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.644544 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.861645 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt"] Jan 27 13:07:52 crc kubenswrapper[4900]: E0127 13:07:52.862428 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b0ea52c-09c3-4bf6-9c72-ae576a224b72" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.862457 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b0ea52c-09c3-4bf6-9c72-ae576a224b72" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.862751 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b0ea52c-09c3-4bf6-9c72-ae576a224b72" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.863978 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.866489 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.866835 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.867074 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.867640 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.870967 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.897151 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt"] Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.905230 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.905557 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqt8q\" (UniqueName: \"kubernetes.io/projected/c14d3cd9-30a5-4852-aa71-915345161f76-kube-api-access-nqt8q\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.905719 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.905868 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c14d3cd9-30a5-4852-aa71-915345161f76-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:52 crc kubenswrapper[4900]: I0127 13:07:52.906018 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:53 crc kubenswrapper[4900]: I0127 13:07:53.009528 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:53 crc kubenswrapper[4900]: I0127 13:07:53.009839 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqt8q\" (UniqueName: \"kubernetes.io/projected/c14d3cd9-30a5-4852-aa71-915345161f76-kube-api-access-nqt8q\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:53 crc kubenswrapper[4900]: I0127 13:07:53.009911 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:53 crc kubenswrapper[4900]: I0127 13:07:53.009951 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c14d3cd9-30a5-4852-aa71-915345161f76-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:53 crc kubenswrapper[4900]: I0127 13:07:53.009978 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:53 crc kubenswrapper[4900]: I0127 13:07:53.011152 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c14d3cd9-30a5-4852-aa71-915345161f76-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:53 crc kubenswrapper[4900]: I0127 13:07:53.014208 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:53 crc kubenswrapper[4900]: I0127 13:07:53.015919 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:53 crc kubenswrapper[4900]: I0127 13:07:53.027137 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:53 crc kubenswrapper[4900]: I0127 13:07:53.031605 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqt8q\" (UniqueName: \"kubernetes.io/projected/c14d3cd9-30a5-4852-aa71-915345161f76-kube-api-access-nqt8q\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-vk2zt\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:53 crc kubenswrapper[4900]: I0127 13:07:53.186189 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:07:53 crc kubenswrapper[4900]: I0127 13:07:53.795344 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt"] Jan 27 13:07:54 crc kubenswrapper[4900]: I0127 13:07:54.680687 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" event={"ID":"c14d3cd9-30a5-4852-aa71-915345161f76","Type":"ContainerStarted","Data":"26ff6dc2c1ff7ceeaccde1ab693852dab980d3a223bfdbf224dcaebd44184bfb"} Jan 27 13:07:54 crc kubenswrapper[4900]: I0127 13:07:54.680950 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" event={"ID":"c14d3cd9-30a5-4852-aa71-915345161f76","Type":"ContainerStarted","Data":"5ad32d66701523ee768c10db56c543c4de705d27e6f075ec95c8cf7e1b63a91e"} Jan 27 13:07:54 crc kubenswrapper[4900]: I0127 13:07:54.723321 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" podStartSLOduration=2.282053698 podStartE2EDuration="2.723273144s" podCreationTimestamp="2026-01-27 13:07:52 +0000 UTC" firstStartedPulling="2026-01-27 13:07:53.799335574 +0000 UTC m=+2501.036363774" lastFinishedPulling="2026-01-27 13:07:54.24055501 +0000 UTC m=+2501.477583220" observedRunningTime="2026-01-27 13:07:54.702289183 +0000 UTC m=+2501.939317413" watchObservedRunningTime="2026-01-27 13:07:54.723273144 +0000 UTC m=+2501.960301364" Jan 27 13:07:59 crc kubenswrapper[4900]: I0127 13:07:59.484262 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:07:59 crc kubenswrapper[4900]: E0127 13:07:59.485349 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:08:14 crc kubenswrapper[4900]: I0127 13:08:14.482761 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:08:14 crc kubenswrapper[4900]: E0127 13:08:14.483522 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:08:26 crc kubenswrapper[4900]: I0127 13:08:26.491850 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:08:26 crc kubenswrapper[4900]: E0127 13:08:26.494282 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:08:39 crc kubenswrapper[4900]: I0127 13:08:39.482625 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:08:39 crc kubenswrapper[4900]: E0127 13:08:39.483577 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:08:53 crc kubenswrapper[4900]: I0127 13:08:53.482306 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:08:53 crc kubenswrapper[4900]: E0127 13:08:53.483359 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:09:01 crc kubenswrapper[4900]: I0127 13:09:01.574360 4900 generic.go:334] "Generic (PLEG): container finished" podID="c14d3cd9-30a5-4852-aa71-915345161f76" containerID="26ff6dc2c1ff7ceeaccde1ab693852dab980d3a223bfdbf224dcaebd44184bfb" exitCode=0 Jan 27 13:09:01 crc kubenswrapper[4900]: I0127 13:09:01.574413 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" event={"ID":"c14d3cd9-30a5-4852-aa71-915345161f76","Type":"ContainerDied","Data":"26ff6dc2c1ff7ceeaccde1ab693852dab980d3a223bfdbf224dcaebd44184bfb"} Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.310411 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.351549 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-ssh-key-openstack-edpm-ipam\") pod \"c14d3cd9-30a5-4852-aa71-915345161f76\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.351935 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c14d3cd9-30a5-4852-aa71-915345161f76-ovncontroller-config-0\") pod \"c14d3cd9-30a5-4852-aa71-915345161f76\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.352077 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-inventory\") pod \"c14d3cd9-30a5-4852-aa71-915345161f76\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.352133 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-ovn-combined-ca-bundle\") pod \"c14d3cd9-30a5-4852-aa71-915345161f76\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.352194 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqt8q\" (UniqueName: \"kubernetes.io/projected/c14d3cd9-30a5-4852-aa71-915345161f76-kube-api-access-nqt8q\") pod \"c14d3cd9-30a5-4852-aa71-915345161f76\" (UID: \"c14d3cd9-30a5-4852-aa71-915345161f76\") " Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.362880 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c14d3cd9-30a5-4852-aa71-915345161f76-kube-api-access-nqt8q" (OuterVolumeSpecName: "kube-api-access-nqt8q") pod "c14d3cd9-30a5-4852-aa71-915345161f76" (UID: "c14d3cd9-30a5-4852-aa71-915345161f76"). InnerVolumeSpecName "kube-api-access-nqt8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.365072 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "c14d3cd9-30a5-4852-aa71-915345161f76" (UID: "c14d3cd9-30a5-4852-aa71-915345161f76"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.391528 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "c14d3cd9-30a5-4852-aa71-915345161f76" (UID: "c14d3cd9-30a5-4852-aa71-915345161f76"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.410492 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c14d3cd9-30a5-4852-aa71-915345161f76-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "c14d3cd9-30a5-4852-aa71-915345161f76" (UID: "c14d3cd9-30a5-4852-aa71-915345161f76"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.414174 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-inventory" (OuterVolumeSpecName: "inventory") pod "c14d3cd9-30a5-4852-aa71-915345161f76" (UID: "c14d3cd9-30a5-4852-aa71-915345161f76"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.455276 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.455350 4900 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/c14d3cd9-30a5-4852-aa71-915345161f76-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.455361 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.455376 4900 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c14d3cd9-30a5-4852-aa71-915345161f76-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.455386 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqt8q\" (UniqueName: \"kubernetes.io/projected/c14d3cd9-30a5-4852-aa71-915345161f76-kube-api-access-nqt8q\") on node \"crc\" DevicePath \"\"" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.605095 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" event={"ID":"c14d3cd9-30a5-4852-aa71-915345161f76","Type":"ContainerDied","Data":"5ad32d66701523ee768c10db56c543c4de705d27e6f075ec95c8cf7e1b63a91e"} Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.605158 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ad32d66701523ee768c10db56c543c4de705d27e6f075ec95c8cf7e1b63a91e" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.605165 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-vk2zt" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.735863 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl"] Jan 27 13:09:03 crc kubenswrapper[4900]: E0127 13:09:03.736866 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c14d3cd9-30a5-4852-aa71-915345161f76" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.736900 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="c14d3cd9-30a5-4852-aa71-915345161f76" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.737421 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="c14d3cd9-30a5-4852-aa71-915345161f76" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.739235 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.746457 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.746528 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.746623 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.746471 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.746738 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.746782 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.768866 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl"] Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.865883 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.866157 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7m9z\" (UniqueName: \"kubernetes.io/projected/299b7b55-c1ed-48af-9570-91937876ab32-kube-api-access-f7m9z\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.866233 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.866459 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.866538 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.866591 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.968957 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.969090 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.969146 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.969191 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.969346 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7m9z\" (UniqueName: \"kubernetes.io/projected/299b7b55-c1ed-48af-9570-91937876ab32-kube-api-access-f7m9z\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.969413 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.974598 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.974615 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.975805 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.976791 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.986448 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:03 crc kubenswrapper[4900]: I0127 13:09:03.993006 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7m9z\" (UniqueName: \"kubernetes.io/projected/299b7b55-c1ed-48af-9570-91937876ab32-kube-api-access-f7m9z\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:04 crc kubenswrapper[4900]: I0127 13:09:04.061352 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:09:04 crc kubenswrapper[4900]: I0127 13:09:04.703539 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl"] Jan 27 13:09:04 crc kubenswrapper[4900]: W0127 13:09:04.712029 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod299b7b55_c1ed_48af_9570_91937876ab32.slice/crio-6f35bf456c16e6d80dbab120ecfd2257148aef36498ca8de6a394cdb8ee887e7 WatchSource:0}: Error finding container 6f35bf456c16e6d80dbab120ecfd2257148aef36498ca8de6a394cdb8ee887e7: Status 404 returned error can't find the container with id 6f35bf456c16e6d80dbab120ecfd2257148aef36498ca8de6a394cdb8ee887e7 Jan 27 13:09:05 crc kubenswrapper[4900]: I0127 13:09:05.648696 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" event={"ID":"299b7b55-c1ed-48af-9570-91937876ab32","Type":"ContainerStarted","Data":"f216690a6b67c671695b20f624b53aa664eb54dc193b5515a33c40ef367d9740"} Jan 27 13:09:05 crc kubenswrapper[4900]: I0127 13:09:05.649306 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" event={"ID":"299b7b55-c1ed-48af-9570-91937876ab32","Type":"ContainerStarted","Data":"6f35bf456c16e6d80dbab120ecfd2257148aef36498ca8de6a394cdb8ee887e7"} Jan 27 13:09:05 crc kubenswrapper[4900]: I0127 13:09:05.684001 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" podStartSLOduration=2.221260221 podStartE2EDuration="2.683977713s" podCreationTimestamp="2026-01-27 13:09:03 +0000 UTC" firstStartedPulling="2026-01-27 13:09:04.715697183 +0000 UTC m=+2571.952725393" lastFinishedPulling="2026-01-27 13:09:05.178414665 +0000 UTC m=+2572.415442885" observedRunningTime="2026-01-27 13:09:05.671761357 +0000 UTC m=+2572.908789567" watchObservedRunningTime="2026-01-27 13:09:05.683977713 +0000 UTC m=+2572.921005943" Jan 27 13:09:07 crc kubenswrapper[4900]: I0127 13:09:07.483301 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:09:07 crc kubenswrapper[4900]: E0127 13:09:07.484009 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:09:22 crc kubenswrapper[4900]: I0127 13:09:22.483689 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:09:22 crc kubenswrapper[4900]: I0127 13:09:22.902688 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"1f022fb6295bafc9407689be7f800205befa38981584e07b6408229b26335d7d"} Jan 27 13:09:30 crc kubenswrapper[4900]: I0127 13:09:30.824894 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gzq5p"] Jan 27 13:09:30 crc kubenswrapper[4900]: I0127 13:09:30.829661 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:09:30 crc kubenswrapper[4900]: I0127 13:09:30.842093 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gzq5p"] Jan 27 13:09:30 crc kubenswrapper[4900]: I0127 13:09:30.931588 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wnnt\" (UniqueName: \"kubernetes.io/projected/9d17e8de-7313-497d-bbad-d596a4668e57-kube-api-access-6wnnt\") pod \"redhat-operators-gzq5p\" (UID: \"9d17e8de-7313-497d-bbad-d596a4668e57\") " pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:09:30 crc kubenswrapper[4900]: I0127 13:09:30.931750 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d17e8de-7313-497d-bbad-d596a4668e57-catalog-content\") pod \"redhat-operators-gzq5p\" (UID: \"9d17e8de-7313-497d-bbad-d596a4668e57\") " pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:09:30 crc kubenswrapper[4900]: I0127 13:09:30.932358 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d17e8de-7313-497d-bbad-d596a4668e57-utilities\") pod \"redhat-operators-gzq5p\" (UID: \"9d17e8de-7313-497d-bbad-d596a4668e57\") " pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:09:31 crc kubenswrapper[4900]: I0127 13:09:31.034844 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d17e8de-7313-497d-bbad-d596a4668e57-utilities\") pod \"redhat-operators-gzq5p\" (UID: \"9d17e8de-7313-497d-bbad-d596a4668e57\") " pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:09:31 crc kubenswrapper[4900]: I0127 13:09:31.034975 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wnnt\" (UniqueName: \"kubernetes.io/projected/9d17e8de-7313-497d-bbad-d596a4668e57-kube-api-access-6wnnt\") pod \"redhat-operators-gzq5p\" (UID: \"9d17e8de-7313-497d-bbad-d596a4668e57\") " pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:09:31 crc kubenswrapper[4900]: I0127 13:09:31.035019 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d17e8de-7313-497d-bbad-d596a4668e57-catalog-content\") pod \"redhat-operators-gzq5p\" (UID: \"9d17e8de-7313-497d-bbad-d596a4668e57\") " pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:09:31 crc kubenswrapper[4900]: I0127 13:09:31.035715 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d17e8de-7313-497d-bbad-d596a4668e57-catalog-content\") pod \"redhat-operators-gzq5p\" (UID: \"9d17e8de-7313-497d-bbad-d596a4668e57\") " pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:09:31 crc kubenswrapper[4900]: I0127 13:09:31.035901 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d17e8de-7313-497d-bbad-d596a4668e57-utilities\") pod \"redhat-operators-gzq5p\" (UID: \"9d17e8de-7313-497d-bbad-d596a4668e57\") " pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:09:31 crc kubenswrapper[4900]: I0127 13:09:31.057902 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wnnt\" (UniqueName: \"kubernetes.io/projected/9d17e8de-7313-497d-bbad-d596a4668e57-kube-api-access-6wnnt\") pod \"redhat-operators-gzq5p\" (UID: \"9d17e8de-7313-497d-bbad-d596a4668e57\") " pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:09:31 crc kubenswrapper[4900]: I0127 13:09:31.159509 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:09:31 crc kubenswrapper[4900]: I0127 13:09:31.900346 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gzq5p"] Jan 27 13:09:32 crc kubenswrapper[4900]: I0127 13:09:32.042531 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gzq5p" event={"ID":"9d17e8de-7313-497d-bbad-d596a4668e57","Type":"ContainerStarted","Data":"2028ba1a1428403076c4f800a5ed2cca8d7d530aa50d37ae86669100d64c181b"} Jan 27 13:09:33 crc kubenswrapper[4900]: I0127 13:09:33.060464 4900 generic.go:334] "Generic (PLEG): container finished" podID="9d17e8de-7313-497d-bbad-d596a4668e57" containerID="85343095c9b0a31fd607bd18b84b0887d7589d2028a2a6f9be37536f357b5300" exitCode=0 Jan 27 13:09:33 crc kubenswrapper[4900]: I0127 13:09:33.060581 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gzq5p" event={"ID":"9d17e8de-7313-497d-bbad-d596a4668e57","Type":"ContainerDied","Data":"85343095c9b0a31fd607bd18b84b0887d7589d2028a2a6f9be37536f357b5300"} Jan 27 13:09:34 crc kubenswrapper[4900]: I0127 13:09:34.080356 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gzq5p" event={"ID":"9d17e8de-7313-497d-bbad-d596a4668e57","Type":"ContainerStarted","Data":"14d672124e294852d3c82f5c9ea34d124e0ba2fd0b9a1d8090e069a66efdbb1a"} Jan 27 13:09:42 crc kubenswrapper[4900]: I0127 13:09:42.183463 4900 generic.go:334] "Generic (PLEG): container finished" podID="9d17e8de-7313-497d-bbad-d596a4668e57" containerID="14d672124e294852d3c82f5c9ea34d124e0ba2fd0b9a1d8090e069a66efdbb1a" exitCode=0 Jan 27 13:09:42 crc kubenswrapper[4900]: I0127 13:09:42.183566 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gzq5p" event={"ID":"9d17e8de-7313-497d-bbad-d596a4668e57","Type":"ContainerDied","Data":"14d672124e294852d3c82f5c9ea34d124e0ba2fd0b9a1d8090e069a66efdbb1a"} Jan 27 13:09:42 crc kubenswrapper[4900]: I0127 13:09:42.188367 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 13:09:43 crc kubenswrapper[4900]: I0127 13:09:43.202441 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gzq5p" event={"ID":"9d17e8de-7313-497d-bbad-d596a4668e57","Type":"ContainerStarted","Data":"c8f812bba499c7ee32a980dae9f50a2257aa66be157d85dcf41cdab9e5316beb"} Jan 27 13:09:43 crc kubenswrapper[4900]: I0127 13:09:43.274803 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gzq5p" podStartSLOduration=3.7380895179999998 podStartE2EDuration="13.27476902s" podCreationTimestamp="2026-01-27 13:09:30 +0000 UTC" firstStartedPulling="2026-01-27 13:09:33.063790471 +0000 UTC m=+2600.300818681" lastFinishedPulling="2026-01-27 13:09:42.600469953 +0000 UTC m=+2609.837498183" observedRunningTime="2026-01-27 13:09:43.226523817 +0000 UTC m=+2610.463552037" watchObservedRunningTime="2026-01-27 13:09:43.27476902 +0000 UTC m=+2610.511797240" Jan 27 13:09:51 crc kubenswrapper[4900]: I0127 13:09:51.162376 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:09:51 crc kubenswrapper[4900]: I0127 13:09:51.162939 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:09:52 crc kubenswrapper[4900]: I0127 13:09:52.211586 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gzq5p" podUID="9d17e8de-7313-497d-bbad-d596a4668e57" containerName="registry-server" probeResult="failure" output=< Jan 27 13:09:52 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:09:52 crc kubenswrapper[4900]: > Jan 27 13:10:00 crc kubenswrapper[4900]: I0127 13:10:00.480849 4900 generic.go:334] "Generic (PLEG): container finished" podID="299b7b55-c1ed-48af-9570-91937876ab32" containerID="f216690a6b67c671695b20f624b53aa664eb54dc193b5515a33c40ef367d9740" exitCode=0 Jan 27 13:10:00 crc kubenswrapper[4900]: I0127 13:10:00.501419 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" event={"ID":"299b7b55-c1ed-48af-9570-91937876ab32","Type":"ContainerDied","Data":"f216690a6b67c671695b20f624b53aa664eb54dc193b5515a33c40ef367d9740"} Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.038811 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.174121 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-nova-metadata-neutron-config-0\") pod \"299b7b55-c1ed-48af-9570-91937876ab32\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.174345 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-ssh-key-openstack-edpm-ipam\") pod \"299b7b55-c1ed-48af-9570-91937876ab32\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.174415 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-neutron-metadata-combined-ca-bundle\") pod \"299b7b55-c1ed-48af-9570-91937876ab32\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.174502 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-inventory\") pod \"299b7b55-c1ed-48af-9570-91937876ab32\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.174571 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7m9z\" (UniqueName: \"kubernetes.io/projected/299b7b55-c1ed-48af-9570-91937876ab32-kube-api-access-f7m9z\") pod \"299b7b55-c1ed-48af-9570-91937876ab32\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.174683 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-neutron-ovn-metadata-agent-neutron-config-0\") pod \"299b7b55-c1ed-48af-9570-91937876ab32\" (UID: \"299b7b55-c1ed-48af-9570-91937876ab32\") " Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.182544 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "299b7b55-c1ed-48af-9570-91937876ab32" (UID: "299b7b55-c1ed-48af-9570-91937876ab32"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.184460 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/299b7b55-c1ed-48af-9570-91937876ab32-kube-api-access-f7m9z" (OuterVolumeSpecName: "kube-api-access-f7m9z") pod "299b7b55-c1ed-48af-9570-91937876ab32" (UID: "299b7b55-c1ed-48af-9570-91937876ab32"). InnerVolumeSpecName "kube-api-access-f7m9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.214786 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "299b7b55-c1ed-48af-9570-91937876ab32" (UID: "299b7b55-c1ed-48af-9570-91937876ab32"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.216025 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gzq5p" podUID="9d17e8de-7313-497d-bbad-d596a4668e57" containerName="registry-server" probeResult="failure" output=< Jan 27 13:10:02 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:10:02 crc kubenswrapper[4900]: > Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.218202 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-inventory" (OuterVolumeSpecName: "inventory") pod "299b7b55-c1ed-48af-9570-91937876ab32" (UID: "299b7b55-c1ed-48af-9570-91937876ab32"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.232431 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "299b7b55-c1ed-48af-9570-91937876ab32" (UID: "299b7b55-c1ed-48af-9570-91937876ab32"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.235700 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "299b7b55-c1ed-48af-9570-91937876ab32" (UID: "299b7b55-c1ed-48af-9570-91937876ab32"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.278747 4900 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.278805 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.278822 4900 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.278840 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.278855 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7m9z\" (UniqueName: \"kubernetes.io/projected/299b7b55-c1ed-48af-9570-91937876ab32-kube-api-access-f7m9z\") on node \"crc\" DevicePath \"\"" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.278869 4900 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/299b7b55-c1ed-48af-9570-91937876ab32-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.522250 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" event={"ID":"299b7b55-c1ed-48af-9570-91937876ab32","Type":"ContainerDied","Data":"6f35bf456c16e6d80dbab120ecfd2257148aef36498ca8de6a394cdb8ee887e7"} Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.522744 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f35bf456c16e6d80dbab120ecfd2257148aef36498ca8de6a394cdb8ee887e7" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.522901 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.711171 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc"] Jan 27 13:10:02 crc kubenswrapper[4900]: E0127 13:10:02.711988 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="299b7b55-c1ed-48af-9570-91937876ab32" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.712010 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="299b7b55-c1ed-48af-9570-91937876ab32" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.712351 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="299b7b55-c1ed-48af-9570-91937876ab32" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.713598 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.719963 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.720608 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.724912 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc"] Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.730243 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.730338 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.730496 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.822780 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.822870 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46j7p\" (UniqueName: \"kubernetes.io/projected/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-kube-api-access-46j7p\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.823023 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.823177 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.823204 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.926273 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.926406 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46j7p\" (UniqueName: \"kubernetes.io/projected/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-kube-api-access-46j7p\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.926550 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.926724 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.926766 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.932898 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.933383 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.945740 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46j7p\" (UniqueName: \"kubernetes.io/projected/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-kube-api-access-46j7p\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.946968 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:02 crc kubenswrapper[4900]: I0127 13:10:02.951606 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:03 crc kubenswrapper[4900]: I0127 13:10:03.155017 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:10:03 crc kubenswrapper[4900]: I0127 13:10:03.901844 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc"] Jan 27 13:10:04 crc kubenswrapper[4900]: I0127 13:10:04.600756 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" event={"ID":"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d","Type":"ContainerStarted","Data":"bc45f6cb1044110b594c5b775d6b52bf6f5c01d1beca8f1344b88c67d3002282"} Jan 27 13:10:05 crc kubenswrapper[4900]: I0127 13:10:05.615278 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" event={"ID":"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d","Type":"ContainerStarted","Data":"93b2017506f31dec79bbb0b81fc8b863bee0494953ec9acff8575e5ac307c0d9"} Jan 27 13:10:05 crc kubenswrapper[4900]: I0127 13:10:05.661112 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" podStartSLOduration=3.224302149 podStartE2EDuration="3.661079766s" podCreationTimestamp="2026-01-27 13:10:02 +0000 UTC" firstStartedPulling="2026-01-27 13:10:03.912746133 +0000 UTC m=+2631.149774343" lastFinishedPulling="2026-01-27 13:10:04.34952375 +0000 UTC m=+2631.586551960" observedRunningTime="2026-01-27 13:10:05.650802877 +0000 UTC m=+2632.887831087" watchObservedRunningTime="2026-01-27 13:10:05.661079766 +0000 UTC m=+2632.898107966" Jan 27 13:10:11 crc kubenswrapper[4900]: I0127 13:10:11.228225 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:10:11 crc kubenswrapper[4900]: I0127 13:10:11.292701 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:10:11 crc kubenswrapper[4900]: I0127 13:10:11.480545 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gzq5p"] Jan 27 13:10:12 crc kubenswrapper[4900]: I0127 13:10:12.707346 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gzq5p" podUID="9d17e8de-7313-497d-bbad-d596a4668e57" containerName="registry-server" containerID="cri-o://c8f812bba499c7ee32a980dae9f50a2257aa66be157d85dcf41cdab9e5316beb" gracePeriod=2 Jan 27 13:10:13 crc kubenswrapper[4900]: I0127 13:10:13.731135 4900 generic.go:334] "Generic (PLEG): container finished" podID="9d17e8de-7313-497d-bbad-d596a4668e57" containerID="c8f812bba499c7ee32a980dae9f50a2257aa66be157d85dcf41cdab9e5316beb" exitCode=0 Jan 27 13:10:13 crc kubenswrapper[4900]: I0127 13:10:13.731237 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gzq5p" event={"ID":"9d17e8de-7313-497d-bbad-d596a4668e57","Type":"ContainerDied","Data":"c8f812bba499c7ee32a980dae9f50a2257aa66be157d85dcf41cdab9e5316beb"} Jan 27 13:10:13 crc kubenswrapper[4900]: I0127 13:10:13.731551 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gzq5p" event={"ID":"9d17e8de-7313-497d-bbad-d596a4668e57","Type":"ContainerDied","Data":"2028ba1a1428403076c4f800a5ed2cca8d7d530aa50d37ae86669100d64c181b"} Jan 27 13:10:13 crc kubenswrapper[4900]: I0127 13:10:13.731570 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2028ba1a1428403076c4f800a5ed2cca8d7d530aa50d37ae86669100d64c181b" Jan 27 13:10:13 crc kubenswrapper[4900]: I0127 13:10:13.848470 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:10:13 crc kubenswrapper[4900]: I0127 13:10:13.921836 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6wnnt\" (UniqueName: \"kubernetes.io/projected/9d17e8de-7313-497d-bbad-d596a4668e57-kube-api-access-6wnnt\") pod \"9d17e8de-7313-497d-bbad-d596a4668e57\" (UID: \"9d17e8de-7313-497d-bbad-d596a4668e57\") " Jan 27 13:10:13 crc kubenswrapper[4900]: I0127 13:10:13.922272 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d17e8de-7313-497d-bbad-d596a4668e57-catalog-content\") pod \"9d17e8de-7313-497d-bbad-d596a4668e57\" (UID: \"9d17e8de-7313-497d-bbad-d596a4668e57\") " Jan 27 13:10:13 crc kubenswrapper[4900]: I0127 13:10:13.922353 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d17e8de-7313-497d-bbad-d596a4668e57-utilities\") pod \"9d17e8de-7313-497d-bbad-d596a4668e57\" (UID: \"9d17e8de-7313-497d-bbad-d596a4668e57\") " Jan 27 13:10:13 crc kubenswrapper[4900]: I0127 13:10:13.927694 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d17e8de-7313-497d-bbad-d596a4668e57-utilities" (OuterVolumeSpecName: "utilities") pod "9d17e8de-7313-497d-bbad-d596a4668e57" (UID: "9d17e8de-7313-497d-bbad-d596a4668e57"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:10:13 crc kubenswrapper[4900]: I0127 13:10:13.945599 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d17e8de-7313-497d-bbad-d596a4668e57-kube-api-access-6wnnt" (OuterVolumeSpecName: "kube-api-access-6wnnt") pod "9d17e8de-7313-497d-bbad-d596a4668e57" (UID: "9d17e8de-7313-497d-bbad-d596a4668e57"). InnerVolumeSpecName "kube-api-access-6wnnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:10:14 crc kubenswrapper[4900]: I0127 13:10:14.028770 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6wnnt\" (UniqueName: \"kubernetes.io/projected/9d17e8de-7313-497d-bbad-d596a4668e57-kube-api-access-6wnnt\") on node \"crc\" DevicePath \"\"" Jan 27 13:10:14 crc kubenswrapper[4900]: I0127 13:10:14.028826 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d17e8de-7313-497d-bbad-d596a4668e57-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:10:14 crc kubenswrapper[4900]: I0127 13:10:14.074340 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d17e8de-7313-497d-bbad-d596a4668e57-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9d17e8de-7313-497d-bbad-d596a4668e57" (UID: "9d17e8de-7313-497d-bbad-d596a4668e57"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:10:14 crc kubenswrapper[4900]: I0127 13:10:14.139737 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d17e8de-7313-497d-bbad-d596a4668e57-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:10:14 crc kubenswrapper[4900]: I0127 13:10:14.743445 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gzq5p" Jan 27 13:10:14 crc kubenswrapper[4900]: I0127 13:10:14.786596 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gzq5p"] Jan 27 13:10:14 crc kubenswrapper[4900]: I0127 13:10:14.800402 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gzq5p"] Jan 27 13:10:16 crc kubenswrapper[4900]: I0127 13:10:16.517184 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d17e8de-7313-497d-bbad-d596a4668e57" path="/var/lib/kubelet/pods/9d17e8de-7313-497d-bbad-d596a4668e57/volumes" Jan 27 13:11:22 crc kubenswrapper[4900]: I0127 13:11:22.372565 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:11:22 crc kubenswrapper[4900]: I0127 13:11:22.373565 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:11:52 crc kubenswrapper[4900]: I0127 13:11:52.373463 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:11:52 crc kubenswrapper[4900]: I0127 13:11:52.374253 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:12:22 crc kubenswrapper[4900]: I0127 13:12:22.372794 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:12:22 crc kubenswrapper[4900]: I0127 13:12:22.375118 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:12:22 crc kubenswrapper[4900]: I0127 13:12:22.375185 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 13:12:22 crc kubenswrapper[4900]: I0127 13:12:22.378818 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1f022fb6295bafc9407689be7f800205befa38981584e07b6408229b26335d7d"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 13:12:22 crc kubenswrapper[4900]: I0127 13:12:22.378894 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://1f022fb6295bafc9407689be7f800205befa38981584e07b6408229b26335d7d" gracePeriod=600 Jan 27 13:12:23 crc kubenswrapper[4900]: I0127 13:12:23.064607 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="1f022fb6295bafc9407689be7f800205befa38981584e07b6408229b26335d7d" exitCode=0 Jan 27 13:12:23 crc kubenswrapper[4900]: I0127 13:12:23.064624 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"1f022fb6295bafc9407689be7f800205befa38981584e07b6408229b26335d7d"} Jan 27 13:12:23 crc kubenswrapper[4900]: I0127 13:12:23.065568 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579"} Jan 27 13:12:23 crc kubenswrapper[4900]: I0127 13:12:23.065614 4900 scope.go:117] "RemoveContainer" containerID="f24276275dc03a2ac995a4d1827bc72c05bcaf725dfaf397b9ba24c98b87be4e" Jan 27 13:13:45 crc kubenswrapper[4900]: I0127 13:13:45.851275 4900 patch_prober.go:28] interesting pod/console-operator-58897d9998-x96hr container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 13:13:45 crc kubenswrapper[4900]: I0127 13:13:45.852972 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-x96hr" podUID="a3973a7c-f509-4769-93a6-4f71f99cc515" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 13:13:45 crc kubenswrapper[4900]: I0127 13:13:45.851345 4900 patch_prober.go:28] interesting pod/console-operator-58897d9998-x96hr container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 13:13:45 crc kubenswrapper[4900]: I0127 13:13:45.853493 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-x96hr" podUID="a3973a7c-f509-4769-93a6-4f71f99cc515" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 13:14:22 crc kubenswrapper[4900]: I0127 13:14:22.372259 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:14:22 crc kubenswrapper[4900]: I0127 13:14:22.372949 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:14:30 crc kubenswrapper[4900]: I0127 13:14:30.114344 4900 generic.go:334] "Generic (PLEG): container finished" podID="3a7fbe74-19ba-4e60-9f23-2fb69c8e141d" containerID="93b2017506f31dec79bbb0b81fc8b863bee0494953ec9acff8575e5ac307c0d9" exitCode=0 Jan 27 13:14:30 crc kubenswrapper[4900]: I0127 13:14:30.114443 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" event={"ID":"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d","Type":"ContainerDied","Data":"93b2017506f31dec79bbb0b81fc8b863bee0494953ec9acff8575e5ac307c0d9"} Jan 27 13:14:31 crc kubenswrapper[4900]: I0127 13:14:31.884399 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.015772 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46j7p\" (UniqueName: \"kubernetes.io/projected/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-kube-api-access-46j7p\") pod \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.015821 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-inventory\") pod \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.016248 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-ssh-key-openstack-edpm-ipam\") pod \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.016422 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-libvirt-combined-ca-bundle\") pod \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.016465 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-libvirt-secret-0\") pod \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\" (UID: \"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d\") " Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.025177 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "3a7fbe74-19ba-4e60-9f23-2fb69c8e141d" (UID: "3a7fbe74-19ba-4e60-9f23-2fb69c8e141d"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.026340 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-kube-api-access-46j7p" (OuterVolumeSpecName: "kube-api-access-46j7p") pod "3a7fbe74-19ba-4e60-9f23-2fb69c8e141d" (UID: "3a7fbe74-19ba-4e60-9f23-2fb69c8e141d"). InnerVolumeSpecName "kube-api-access-46j7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.393855 4900 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.393907 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46j7p\" (UniqueName: \"kubernetes.io/projected/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-kube-api-access-46j7p\") on node \"crc\" DevicePath \"\"" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.424576 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" event={"ID":"3a7fbe74-19ba-4e60-9f23-2fb69c8e141d","Type":"ContainerDied","Data":"bc45f6cb1044110b594c5b775d6b52bf6f5c01d1beca8f1344b88c67d3002282"} Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.424684 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc45f6cb1044110b594c5b775d6b52bf6f5c01d1beca8f1344b88c67d3002282" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.425706 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.440228 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-inventory" (OuterVolumeSpecName: "inventory") pod "3a7fbe74-19ba-4e60-9f23-2fb69c8e141d" (UID: "3a7fbe74-19ba-4e60-9f23-2fb69c8e141d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.462436 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "3a7fbe74-19ba-4e60-9f23-2fb69c8e141d" (UID: "3a7fbe74-19ba-4e60-9f23-2fb69c8e141d"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.476433 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "3a7fbe74-19ba-4e60-9f23-2fb69c8e141d" (UID: "3a7fbe74-19ba-4e60-9f23-2fb69c8e141d"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.497225 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.497261 4900 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.497283 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3a7fbe74-19ba-4e60-9f23-2fb69c8e141d-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.542845 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88"] Jan 27 13:14:32 crc kubenswrapper[4900]: E0127 13:14:32.543960 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a7fbe74-19ba-4e60-9f23-2fb69c8e141d" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.543994 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a7fbe74-19ba-4e60-9f23-2fb69c8e141d" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 27 13:14:32 crc kubenswrapper[4900]: E0127 13:14:32.544010 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d17e8de-7313-497d-bbad-d596a4668e57" containerName="extract-utilities" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.544021 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d17e8de-7313-497d-bbad-d596a4668e57" containerName="extract-utilities" Jan 27 13:14:32 crc kubenswrapper[4900]: E0127 13:14:32.544040 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d17e8de-7313-497d-bbad-d596a4668e57" containerName="extract-content" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.544050 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d17e8de-7313-497d-bbad-d596a4668e57" containerName="extract-content" Jan 27 13:14:32 crc kubenswrapper[4900]: E0127 13:14:32.545181 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d17e8de-7313-497d-bbad-d596a4668e57" containerName="registry-server" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.545205 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d17e8de-7313-497d-bbad-d596a4668e57" containerName="registry-server" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.545747 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a7fbe74-19ba-4e60-9f23-2fb69c8e141d" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.545816 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d17e8de-7313-497d-bbad-d596a4668e57" containerName="registry-server" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.552304 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.558687 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.559016 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.561031 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.596263 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88"] Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.605627 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.605721 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.620525 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/b9439553-d0f2-460f-b661-562e378a4b3c-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.620691 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bspj\" (UniqueName: \"kubernetes.io/projected/b9439553-d0f2-460f-b661-562e378a4b3c-kube-api-access-7bspj\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.620820 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.621303 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.621651 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.621693 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.621767 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.726876 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.727002 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.727121 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.727155 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.727246 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.727522 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.728673 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.728800 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/b9439553-d0f2-460f-b661-562e378a4b3c-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.728874 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bspj\" (UniqueName: \"kubernetes.io/projected/b9439553-d0f2-460f-b661-562e378a4b3c-kube-api-access-7bspj\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.730280 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/b9439553-d0f2-460f-b661-562e378a4b3c-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.733962 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.734041 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.736009 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.736164 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.737653 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.746129 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.751748 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.752369 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bspj\" (UniqueName: \"kubernetes.io/projected/b9439553-d0f2-460f-b661-562e378a4b3c-kube-api-access-7bspj\") pod \"nova-edpm-deployment-openstack-edpm-ipam-7xv88\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:32 crc kubenswrapper[4900]: I0127 13:14:32.965049 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:14:33 crc kubenswrapper[4900]: I0127 13:14:33.630623 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88"] Jan 27 13:14:34 crc kubenswrapper[4900]: I0127 13:14:34.458364 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" event={"ID":"b9439553-d0f2-460f-b661-562e378a4b3c","Type":"ContainerStarted","Data":"b26bfabf0b46474ff41a7ef1150139a3e6c663d32307ad7f26927e858eb6e94b"} Jan 27 13:14:35 crc kubenswrapper[4900]: I0127 13:14:35.568644 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" event={"ID":"b9439553-d0f2-460f-b661-562e378a4b3c","Type":"ContainerStarted","Data":"61740dd7950b0dbe729b68c138ad6662d666b8aec75a1b64a785ce84c973c18c"} Jan 27 13:14:35 crc kubenswrapper[4900]: I0127 13:14:35.616660 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" podStartSLOduration=3.099286307 podStartE2EDuration="3.616620316s" podCreationTimestamp="2026-01-27 13:14:32 +0000 UTC" firstStartedPulling="2026-01-27 13:14:33.630032512 +0000 UTC m=+2900.867060722" lastFinishedPulling="2026-01-27 13:14:34.147366521 +0000 UTC m=+2901.384394731" observedRunningTime="2026-01-27 13:14:35.594472961 +0000 UTC m=+2902.831501181" watchObservedRunningTime="2026-01-27 13:14:35.616620316 +0000 UTC m=+2902.853648526" Jan 27 13:14:52 crc kubenswrapper[4900]: I0127 13:14:52.372922 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:14:52 crc kubenswrapper[4900]: I0127 13:14:52.373552 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.560129 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f"] Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.563224 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.567634 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.569939 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.583673 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f"] Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.695007 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f8d606c-71a1-417f-b6f5-0e224785c9e5-secret-volume\") pod \"collect-profiles-29491995-d8c7f\" (UID: \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.695294 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxxvx\" (UniqueName: \"kubernetes.io/projected/7f8d606c-71a1-417f-b6f5-0e224785c9e5-kube-api-access-pxxvx\") pod \"collect-profiles-29491995-d8c7f\" (UID: \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.695444 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f8d606c-71a1-417f-b6f5-0e224785c9e5-config-volume\") pod \"collect-profiles-29491995-d8c7f\" (UID: \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.798643 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxxvx\" (UniqueName: \"kubernetes.io/projected/7f8d606c-71a1-417f-b6f5-0e224785c9e5-kube-api-access-pxxvx\") pod \"collect-profiles-29491995-d8c7f\" (UID: \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.798790 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f8d606c-71a1-417f-b6f5-0e224785c9e5-config-volume\") pod \"collect-profiles-29491995-d8c7f\" (UID: \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.798901 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f8d606c-71a1-417f-b6f5-0e224785c9e5-secret-volume\") pod \"collect-profiles-29491995-d8c7f\" (UID: \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.799782 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f8d606c-71a1-417f-b6f5-0e224785c9e5-config-volume\") pod \"collect-profiles-29491995-d8c7f\" (UID: \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.814090 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f8d606c-71a1-417f-b6f5-0e224785c9e5-secret-volume\") pod \"collect-profiles-29491995-d8c7f\" (UID: \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.818228 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxxvx\" (UniqueName: \"kubernetes.io/projected/7f8d606c-71a1-417f-b6f5-0e224785c9e5-kube-api-access-pxxvx\") pod \"collect-profiles-29491995-d8c7f\" (UID: \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" Jan 27 13:15:00 crc kubenswrapper[4900]: I0127 13:15:00.901088 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" Jan 27 13:15:01 crc kubenswrapper[4900]: I0127 13:15:01.690602 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f"] Jan 27 13:15:01 crc kubenswrapper[4900]: I0127 13:15:01.899268 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" event={"ID":"7f8d606c-71a1-417f-b6f5-0e224785c9e5","Type":"ContainerStarted","Data":"b775cad854ed19c5e4bb22e43863fd474d89f6def2eb9ad175ed69d39c40a9f8"} Jan 27 13:15:02 crc kubenswrapper[4900]: I0127 13:15:02.916828 4900 generic.go:334] "Generic (PLEG): container finished" podID="7f8d606c-71a1-417f-b6f5-0e224785c9e5" containerID="3d46533d3f6c249ca600d3f4d986771628602ad7d225cce6313470e290e83923" exitCode=0 Jan 27 13:15:02 crc kubenswrapper[4900]: I0127 13:15:02.916974 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" event={"ID":"7f8d606c-71a1-417f-b6f5-0e224785c9e5","Type":"ContainerDied","Data":"3d46533d3f6c249ca600d3f4d986771628602ad7d225cce6313470e290e83923"} Jan 27 13:15:04 crc kubenswrapper[4900]: I0127 13:15:04.405243 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" Jan 27 13:15:04 crc kubenswrapper[4900]: I0127 13:15:04.574045 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f8d606c-71a1-417f-b6f5-0e224785c9e5-config-volume\") pod \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\" (UID: \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\") " Jan 27 13:15:04 crc kubenswrapper[4900]: I0127 13:15:04.574180 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxxvx\" (UniqueName: \"kubernetes.io/projected/7f8d606c-71a1-417f-b6f5-0e224785c9e5-kube-api-access-pxxvx\") pod \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\" (UID: \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\") " Jan 27 13:15:04 crc kubenswrapper[4900]: I0127 13:15:04.574466 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f8d606c-71a1-417f-b6f5-0e224785c9e5-secret-volume\") pod \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\" (UID: \"7f8d606c-71a1-417f-b6f5-0e224785c9e5\") " Jan 27 13:15:04 crc kubenswrapper[4900]: I0127 13:15:04.574966 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f8d606c-71a1-417f-b6f5-0e224785c9e5-config-volume" (OuterVolumeSpecName: "config-volume") pod "7f8d606c-71a1-417f-b6f5-0e224785c9e5" (UID: "7f8d606c-71a1-417f-b6f5-0e224785c9e5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 13:15:04 crc kubenswrapper[4900]: I0127 13:15:04.575879 4900 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7f8d606c-71a1-417f-b6f5-0e224785c9e5-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 13:15:04 crc kubenswrapper[4900]: I0127 13:15:04.580745 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f8d606c-71a1-417f-b6f5-0e224785c9e5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7f8d606c-71a1-417f-b6f5-0e224785c9e5" (UID: "7f8d606c-71a1-417f-b6f5-0e224785c9e5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:15:04 crc kubenswrapper[4900]: I0127 13:15:04.581396 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f8d606c-71a1-417f-b6f5-0e224785c9e5-kube-api-access-pxxvx" (OuterVolumeSpecName: "kube-api-access-pxxvx") pod "7f8d606c-71a1-417f-b6f5-0e224785c9e5" (UID: "7f8d606c-71a1-417f-b6f5-0e224785c9e5"). InnerVolumeSpecName "kube-api-access-pxxvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:15:04 crc kubenswrapper[4900]: I0127 13:15:04.680595 4900 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7f8d606c-71a1-417f-b6f5-0e224785c9e5-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 13:15:04 crc kubenswrapper[4900]: I0127 13:15:04.680628 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxxvx\" (UniqueName: \"kubernetes.io/projected/7f8d606c-71a1-417f-b6f5-0e224785c9e5-kube-api-access-pxxvx\") on node \"crc\" DevicePath \"\"" Jan 27 13:15:04 crc kubenswrapper[4900]: I0127 13:15:04.949090 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" event={"ID":"7f8d606c-71a1-417f-b6f5-0e224785c9e5","Type":"ContainerDied","Data":"b775cad854ed19c5e4bb22e43863fd474d89f6def2eb9ad175ed69d39c40a9f8"} Jan 27 13:15:04 crc kubenswrapper[4900]: I0127 13:15:04.949135 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b775cad854ed19c5e4bb22e43863fd474d89f6def2eb9ad175ed69d39c40a9f8" Jan 27 13:15:04 crc kubenswrapper[4900]: I0127 13:15:04.949194 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f" Jan 27 13:15:05 crc kubenswrapper[4900]: I0127 13:15:05.514430 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv"] Jan 27 13:15:05 crc kubenswrapper[4900]: I0127 13:15:05.525865 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491950-nnsrv"] Jan 27 13:15:06 crc kubenswrapper[4900]: I0127 13:15:06.506718 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="524f0966-1edf-43b9-a459-415c57363842" path="/var/lib/kubelet/pods/524f0966-1edf-43b9-a459-415c57363842/volumes" Jan 27 13:15:22 crc kubenswrapper[4900]: I0127 13:15:22.372711 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:15:22 crc kubenswrapper[4900]: I0127 13:15:22.373448 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:15:22 crc kubenswrapper[4900]: I0127 13:15:22.373531 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 13:15:22 crc kubenswrapper[4900]: I0127 13:15:22.374809 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 13:15:22 crc kubenswrapper[4900]: I0127 13:15:22.374915 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" gracePeriod=600 Jan 27 13:15:22 crc kubenswrapper[4900]: E0127 13:15:22.503693 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:15:23 crc kubenswrapper[4900]: I0127 13:15:23.159369 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" exitCode=0 Jan 27 13:15:23 crc kubenswrapper[4900]: I0127 13:15:23.159435 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579"} Jan 27 13:15:23 crc kubenswrapper[4900]: I0127 13:15:23.159779 4900 scope.go:117] "RemoveContainer" containerID="1f022fb6295bafc9407689be7f800205befa38981584e07b6408229b26335d7d" Jan 27 13:15:23 crc kubenswrapper[4900]: I0127 13:15:23.160752 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:15:23 crc kubenswrapper[4900]: E0127 13:15:23.161088 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:15:37 crc kubenswrapper[4900]: I0127 13:15:37.482111 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:15:37 crc kubenswrapper[4900]: E0127 13:15:37.482964 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:15:40 crc kubenswrapper[4900]: I0127 13:15:40.640092 4900 scope.go:117] "RemoveContainer" containerID="a2a2cc21e11008ab339d9c253948538245bf4d7cc6309cf970334ccde3b8143e" Jan 27 13:15:40 crc kubenswrapper[4900]: I0127 13:15:40.686458 4900 scope.go:117] "RemoveContainer" containerID="85343095c9b0a31fd607bd18b84b0887d7589d2028a2a6f9be37536f357b5300" Jan 27 13:15:40 crc kubenswrapper[4900]: I0127 13:15:40.731543 4900 scope.go:117] "RemoveContainer" containerID="14d672124e294852d3c82f5c9ea34d124e0ba2fd0b9a1d8090e069a66efdbb1a" Jan 27 13:15:48 crc kubenswrapper[4900]: I0127 13:15:48.482839 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:15:48 crc kubenswrapper[4900]: E0127 13:15:48.483851 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:16:02 crc kubenswrapper[4900]: I0127 13:16:02.482205 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:16:02 crc kubenswrapper[4900]: E0127 13:16:02.484665 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:16:17 crc kubenswrapper[4900]: I0127 13:16:17.482864 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:16:17 crc kubenswrapper[4900]: E0127 13:16:17.484047 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:16:31 crc kubenswrapper[4900]: I0127 13:16:31.484102 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:16:31 crc kubenswrapper[4900]: E0127 13:16:31.485738 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:16:40 crc kubenswrapper[4900]: I0127 13:16:40.841117 4900 scope.go:117] "RemoveContainer" containerID="c8f812bba499c7ee32a980dae9f50a2257aa66be157d85dcf41cdab9e5316beb" Jan 27 13:16:46 crc kubenswrapper[4900]: I0127 13:16:46.490795 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:16:46 crc kubenswrapper[4900]: E0127 13:16:46.491767 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:16:59 crc kubenswrapper[4900]: I0127 13:16:59.481869 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:16:59 crc kubenswrapper[4900]: E0127 13:16:59.483033 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:17:05 crc kubenswrapper[4900]: I0127 13:17:05.583943 4900 generic.go:334] "Generic (PLEG): container finished" podID="b9439553-d0f2-460f-b661-562e378a4b3c" containerID="61740dd7950b0dbe729b68c138ad6662d666b8aec75a1b64a785ce84c973c18c" exitCode=0 Jan 27 13:17:05 crc kubenswrapper[4900]: I0127 13:17:05.584076 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" event={"ID":"b9439553-d0f2-460f-b661-562e378a4b3c","Type":"ContainerDied","Data":"61740dd7950b0dbe729b68c138ad6662d666b8aec75a1b64a785ce84c973c18c"} Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.117295 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.211392 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-inventory\") pod \"b9439553-d0f2-460f-b661-562e378a4b3c\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.211488 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-combined-ca-bundle\") pod \"b9439553-d0f2-460f-b661-562e378a4b3c\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.211557 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-cell1-compute-config-1\") pod \"b9439553-d0f2-460f-b661-562e378a4b3c\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.211589 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bspj\" (UniqueName: \"kubernetes.io/projected/b9439553-d0f2-460f-b661-562e378a4b3c-kube-api-access-7bspj\") pod \"b9439553-d0f2-460f-b661-562e378a4b3c\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.211675 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-migration-ssh-key-0\") pod \"b9439553-d0f2-460f-b661-562e378a4b3c\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.211758 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-ssh-key-openstack-edpm-ipam\") pod \"b9439553-d0f2-460f-b661-562e378a4b3c\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.211781 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-cell1-compute-config-0\") pod \"b9439553-d0f2-460f-b661-562e378a4b3c\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.211818 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/b9439553-d0f2-460f-b661-562e378a4b3c-nova-extra-config-0\") pod \"b9439553-d0f2-460f-b661-562e378a4b3c\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.211843 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-migration-ssh-key-1\") pod \"b9439553-d0f2-460f-b661-562e378a4b3c\" (UID: \"b9439553-d0f2-460f-b661-562e378a4b3c\") " Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.231945 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "b9439553-d0f2-460f-b661-562e378a4b3c" (UID: "b9439553-d0f2-460f-b661-562e378a4b3c"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.232322 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9439553-d0f2-460f-b661-562e378a4b3c-kube-api-access-7bspj" (OuterVolumeSpecName: "kube-api-access-7bspj") pod "b9439553-d0f2-460f-b661-562e378a4b3c" (UID: "b9439553-d0f2-460f-b661-562e378a4b3c"). InnerVolumeSpecName "kube-api-access-7bspj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.251758 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "b9439553-d0f2-460f-b661-562e378a4b3c" (UID: "b9439553-d0f2-460f-b661-562e378a4b3c"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.252271 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "b9439553-d0f2-460f-b661-562e378a4b3c" (UID: "b9439553-d0f2-460f-b661-562e378a4b3c"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.254235 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9439553-d0f2-460f-b661-562e378a4b3c-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "b9439553-d0f2-460f-b661-562e378a4b3c" (UID: "b9439553-d0f2-460f-b661-562e378a4b3c"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.258422 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "b9439553-d0f2-460f-b661-562e378a4b3c" (UID: "b9439553-d0f2-460f-b661-562e378a4b3c"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.259601 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "b9439553-d0f2-460f-b661-562e378a4b3c" (UID: "b9439553-d0f2-460f-b661-562e378a4b3c"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.260803 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-inventory" (OuterVolumeSpecName: "inventory") pod "b9439553-d0f2-460f-b661-562e378a4b3c" (UID: "b9439553-d0f2-460f-b661-562e378a4b3c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.261588 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "b9439553-d0f2-460f-b661-562e378a4b3c" (UID: "b9439553-d0f2-460f-b661-562e378a4b3c"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.315810 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.315852 4900 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.315865 4900 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/b9439553-d0f2-460f-b661-562e378a4b3c-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.315875 4900 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.315885 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.315896 4900 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.315905 4900 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.315916 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bspj\" (UniqueName: \"kubernetes.io/projected/b9439553-d0f2-460f-b661-562e378a4b3c-kube-api-access-7bspj\") on node \"crc\" DevicePath \"\"" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.315927 4900 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/b9439553-d0f2-460f-b661-562e378a4b3c-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.606006 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" event={"ID":"b9439553-d0f2-460f-b661-562e378a4b3c","Type":"ContainerDied","Data":"b26bfabf0b46474ff41a7ef1150139a3e6c663d32307ad7f26927e858eb6e94b"} Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.606109 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b26bfabf0b46474ff41a7ef1150139a3e6c663d32307ad7f26927e858eb6e94b" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.606230 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-7xv88" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.730204 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g"] Jan 27 13:17:07 crc kubenswrapper[4900]: E0127 13:17:07.730898 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f8d606c-71a1-417f-b6f5-0e224785c9e5" containerName="collect-profiles" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.730920 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f8d606c-71a1-417f-b6f5-0e224785c9e5" containerName="collect-profiles" Jan 27 13:17:07 crc kubenswrapper[4900]: E0127 13:17:07.730941 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9439553-d0f2-460f-b661-562e378a4b3c" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.730949 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9439553-d0f2-460f-b661-562e378a4b3c" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.731332 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f8d606c-71a1-417f-b6f5-0e224785c9e5" containerName="collect-profiles" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.731370 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9439553-d0f2-460f-b661-562e378a4b3c" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.732493 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.735986 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.736644 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.736823 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.737008 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.739273 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.742351 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g"] Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.832406 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8t28g\" (UniqueName: \"kubernetes.io/projected/179a88b6-3ce1-497d-9746-da99584ba03b-kube-api-access-8t28g\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.832475 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.832783 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.832957 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.833036 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.833185 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.833221 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.936532 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8t28g\" (UniqueName: \"kubernetes.io/projected/179a88b6-3ce1-497d-9746-da99584ba03b-kube-api-access-8t28g\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.936614 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.936718 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.936795 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.936875 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.936972 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.937015 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.941397 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.941437 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.941615 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.941659 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.942447 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.943546 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:07 crc kubenswrapper[4900]: I0127 13:17:07.955102 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8t28g\" (UniqueName: \"kubernetes.io/projected/179a88b6-3ce1-497d-9746-da99584ba03b-kube-api-access-8t28g\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4f94g\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:08 crc kubenswrapper[4900]: I0127 13:17:08.054376 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:17:08 crc kubenswrapper[4900]: I0127 13:17:08.671440 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g"] Jan 27 13:17:08 crc kubenswrapper[4900]: I0127 13:17:08.674746 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 13:17:09 crc kubenswrapper[4900]: I0127 13:17:09.632402 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" event={"ID":"179a88b6-3ce1-497d-9746-da99584ba03b","Type":"ContainerStarted","Data":"890e21bb9f6e02505824dce33384f9fef302eaa573aad98386009c2f9b600a14"} Jan 27 13:17:09 crc kubenswrapper[4900]: I0127 13:17:09.633208 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" event={"ID":"179a88b6-3ce1-497d-9746-da99584ba03b","Type":"ContainerStarted","Data":"d9ba56868aa3a7d4ce49e3512638aa19da0b06d7e89288cd7f50ccaacd923a22"} Jan 27 13:17:09 crc kubenswrapper[4900]: I0127 13:17:09.657312 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" podStartSLOduration=2.09570594 podStartE2EDuration="2.657287253s" podCreationTimestamp="2026-01-27 13:17:07 +0000 UTC" firstStartedPulling="2026-01-27 13:17:08.674433712 +0000 UTC m=+3055.911461922" lastFinishedPulling="2026-01-27 13:17:09.236015025 +0000 UTC m=+3056.473043235" observedRunningTime="2026-01-27 13:17:09.652524584 +0000 UTC m=+3056.889552784" watchObservedRunningTime="2026-01-27 13:17:09.657287253 +0000 UTC m=+3056.894315463" Jan 27 13:17:14 crc kubenswrapper[4900]: I0127 13:17:14.482978 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:17:14 crc kubenswrapper[4900]: E0127 13:17:14.483997 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:17:25 crc kubenswrapper[4900]: I0127 13:17:25.482970 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:17:25 crc kubenswrapper[4900]: E0127 13:17:25.484505 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:17:38 crc kubenswrapper[4900]: I0127 13:17:38.482786 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:17:38 crc kubenswrapper[4900]: E0127 13:17:38.483793 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:17:50 crc kubenswrapper[4900]: I0127 13:17:50.482405 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:17:50 crc kubenswrapper[4900]: E0127 13:17:50.483357 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:18:01 crc kubenswrapper[4900]: I0127 13:18:01.482662 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:18:01 crc kubenswrapper[4900]: E0127 13:18:01.483575 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:18:13 crc kubenswrapper[4900]: I0127 13:18:13.482539 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:18:13 crc kubenswrapper[4900]: E0127 13:18:13.483533 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:18:24 crc kubenswrapper[4900]: I0127 13:18:24.482146 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:18:24 crc kubenswrapper[4900]: E0127 13:18:24.482973 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:18:39 crc kubenswrapper[4900]: I0127 13:18:39.482779 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:18:39 crc kubenswrapper[4900]: E0127 13:18:39.484082 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:18:51 crc kubenswrapper[4900]: I0127 13:18:51.483844 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:18:51 crc kubenswrapper[4900]: E0127 13:18:51.485427 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:19:04 crc kubenswrapper[4900]: I0127 13:19:04.482579 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:19:04 crc kubenswrapper[4900]: E0127 13:19:04.483904 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:19:17 crc kubenswrapper[4900]: I0127 13:19:17.483222 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:19:17 crc kubenswrapper[4900]: E0127 13:19:17.484391 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:19:30 crc kubenswrapper[4900]: I0127 13:19:30.483168 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:19:30 crc kubenswrapper[4900]: E0127 13:19:30.484364 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:19:45 crc kubenswrapper[4900]: I0127 13:19:45.482274 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:19:45 crc kubenswrapper[4900]: E0127 13:19:45.483417 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:19:51 crc kubenswrapper[4900]: I0127 13:19:51.063375 4900 generic.go:334] "Generic (PLEG): container finished" podID="179a88b6-3ce1-497d-9746-da99584ba03b" containerID="890e21bb9f6e02505824dce33384f9fef302eaa573aad98386009c2f9b600a14" exitCode=0 Jan 27 13:19:51 crc kubenswrapper[4900]: I0127 13:19:51.063926 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" event={"ID":"179a88b6-3ce1-497d-9746-da99584ba03b","Type":"ContainerDied","Data":"890e21bb9f6e02505824dce33384f9fef302eaa573aad98386009c2f9b600a14"} Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.575861 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.677624 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-0\") pod \"179a88b6-3ce1-497d-9746-da99584ba03b\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.677999 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-2\") pod \"179a88b6-3ce1-497d-9746-da99584ba03b\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.678098 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-telemetry-combined-ca-bundle\") pod \"179a88b6-3ce1-497d-9746-da99584ba03b\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.678302 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-1\") pod \"179a88b6-3ce1-497d-9746-da99584ba03b\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.678446 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ssh-key-openstack-edpm-ipam\") pod \"179a88b6-3ce1-497d-9746-da99584ba03b\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.678498 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8t28g\" (UniqueName: \"kubernetes.io/projected/179a88b6-3ce1-497d-9746-da99584ba03b-kube-api-access-8t28g\") pod \"179a88b6-3ce1-497d-9746-da99584ba03b\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.678584 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-inventory\") pod \"179a88b6-3ce1-497d-9746-da99584ba03b\" (UID: \"179a88b6-3ce1-497d-9746-da99584ba03b\") " Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.683816 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/179a88b6-3ce1-497d-9746-da99584ba03b-kube-api-access-8t28g" (OuterVolumeSpecName: "kube-api-access-8t28g") pod "179a88b6-3ce1-497d-9746-da99584ba03b" (UID: "179a88b6-3ce1-497d-9746-da99584ba03b"). InnerVolumeSpecName "kube-api-access-8t28g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.686208 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "179a88b6-3ce1-497d-9746-da99584ba03b" (UID: "179a88b6-3ce1-497d-9746-da99584ba03b"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.716906 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "179a88b6-3ce1-497d-9746-da99584ba03b" (UID: "179a88b6-3ce1-497d-9746-da99584ba03b"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.717540 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "179a88b6-3ce1-497d-9746-da99584ba03b" (UID: "179a88b6-3ce1-497d-9746-da99584ba03b"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.720806 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "179a88b6-3ce1-497d-9746-da99584ba03b" (UID: "179a88b6-3ce1-497d-9746-da99584ba03b"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.725169 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-inventory" (OuterVolumeSpecName: "inventory") pod "179a88b6-3ce1-497d-9746-da99584ba03b" (UID: "179a88b6-3ce1-497d-9746-da99584ba03b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.727403 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "179a88b6-3ce1-497d-9746-da99584ba03b" (UID: "179a88b6-3ce1-497d-9746-da99584ba03b"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.782405 4900 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.782462 4900 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.782473 4900 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.782486 4900 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.782497 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.782510 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8t28g\" (UniqueName: \"kubernetes.io/projected/179a88b6-3ce1-497d-9746-da99584ba03b-kube-api-access-8t28g\") on node \"crc\" DevicePath \"\"" Jan 27 13:19:52 crc kubenswrapper[4900]: I0127 13:19:52.782524 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/179a88b6-3ce1-497d-9746-da99584ba03b-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.091719 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" event={"ID":"179a88b6-3ce1-497d-9746-da99584ba03b","Type":"ContainerDied","Data":"d9ba56868aa3a7d4ce49e3512638aa19da0b06d7e89288cd7f50ccaacd923a22"} Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.091769 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d9ba56868aa3a7d4ce49e3512638aa19da0b06d7e89288cd7f50ccaacd923a22" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.092497 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4f94g" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.224316 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl"] Jan 27 13:19:53 crc kubenswrapper[4900]: E0127 13:19:53.224918 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="179a88b6-3ce1-497d-9746-da99584ba03b" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.224938 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="179a88b6-3ce1-497d-9746-da99584ba03b" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.225198 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="179a88b6-3ce1-497d-9746-da99584ba03b" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.226122 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.230157 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.231773 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.231933 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-ipmi-config-data" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.232172 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.232352 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.242715 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl"] Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.298808 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.299666 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76bfn\" (UniqueName: \"kubernetes.io/projected/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-kube-api-access-76bfn\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.299755 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.299938 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.300139 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.300346 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ssh-key-openstack-edpm-ipam\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.300432 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.404648 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.404759 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.404851 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ssh-key-openstack-edpm-ipam\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.404900 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.405151 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.405197 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76bfn\" (UniqueName: \"kubernetes.io/projected/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-kube-api-access-76bfn\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.405229 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.412526 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.412907 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.413639 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.414536 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.414793 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ssh-key-openstack-edpm-ipam\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.416111 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.427886 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76bfn\" (UniqueName: \"kubernetes.io/projected/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-kube-api-access-76bfn\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:53 crc kubenswrapper[4900]: I0127 13:19:53.550512 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:19:54 crc kubenswrapper[4900]: I0127 13:19:54.235415 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl"] Jan 27 13:19:55 crc kubenswrapper[4900]: I0127 13:19:55.163399 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" event={"ID":"e20161f2-b4d0-4e63-b7b7-7c359fef99a0","Type":"ContainerStarted","Data":"6eb123e4d4ed9a0619ebc3353be8f393858b61decdead20735083d8ffcdbb8f5"} Jan 27 13:19:56 crc kubenswrapper[4900]: I0127 13:19:56.178797 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" event={"ID":"e20161f2-b4d0-4e63-b7b7-7c359fef99a0","Type":"ContainerStarted","Data":"6ac90d5593c2cc503065122e0e994ed6a2d05715f59fa35688ef2d8996dfa122"} Jan 27 13:19:56 crc kubenswrapper[4900]: I0127 13:19:56.214299 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" podStartSLOduration=2.357335777 podStartE2EDuration="3.214270161s" podCreationTimestamp="2026-01-27 13:19:53 +0000 UTC" firstStartedPulling="2026-01-27 13:19:54.274349111 +0000 UTC m=+3221.511377321" lastFinishedPulling="2026-01-27 13:19:55.131283495 +0000 UTC m=+3222.368311705" observedRunningTime="2026-01-27 13:19:56.208096151 +0000 UTC m=+3223.445124361" watchObservedRunningTime="2026-01-27 13:19:56.214270161 +0000 UTC m=+3223.451298371" Jan 27 13:20:00 crc kubenswrapper[4900]: I0127 13:20:00.484080 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:20:00 crc kubenswrapper[4900]: E0127 13:20:00.491776 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:20:12 crc kubenswrapper[4900]: I0127 13:20:12.482869 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:20:12 crc kubenswrapper[4900]: E0127 13:20:12.484005 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:20:12 crc kubenswrapper[4900]: I0127 13:20:12.937872 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8q2cx"] Jan 27 13:20:12 crc kubenswrapper[4900]: I0127 13:20:12.943209 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:12 crc kubenswrapper[4900]: I0127 13:20:12.957662 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8q2cx"] Jan 27 13:20:12 crc kubenswrapper[4900]: I0127 13:20:12.974673 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q227n\" (UniqueName: \"kubernetes.io/projected/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-kube-api-access-q227n\") pod \"redhat-operators-8q2cx\" (UID: \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\") " pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:12 crc kubenswrapper[4900]: I0127 13:20:12.975254 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-utilities\") pod \"redhat-operators-8q2cx\" (UID: \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\") " pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:12 crc kubenswrapper[4900]: I0127 13:20:12.975850 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-catalog-content\") pod \"redhat-operators-8q2cx\" (UID: \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\") " pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:13 crc kubenswrapper[4900]: I0127 13:20:13.081923 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q227n\" (UniqueName: \"kubernetes.io/projected/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-kube-api-access-q227n\") pod \"redhat-operators-8q2cx\" (UID: \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\") " pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:13 crc kubenswrapper[4900]: I0127 13:20:13.082299 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-utilities\") pod \"redhat-operators-8q2cx\" (UID: \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\") " pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:13 crc kubenswrapper[4900]: I0127 13:20:13.082630 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-catalog-content\") pod \"redhat-operators-8q2cx\" (UID: \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\") " pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:13 crc kubenswrapper[4900]: I0127 13:20:13.083958 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-catalog-content\") pod \"redhat-operators-8q2cx\" (UID: \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\") " pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:13 crc kubenswrapper[4900]: I0127 13:20:13.084413 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-utilities\") pod \"redhat-operators-8q2cx\" (UID: \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\") " pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:13 crc kubenswrapper[4900]: I0127 13:20:13.126642 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q227n\" (UniqueName: \"kubernetes.io/projected/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-kube-api-access-q227n\") pod \"redhat-operators-8q2cx\" (UID: \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\") " pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:13 crc kubenswrapper[4900]: I0127 13:20:13.280881 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:13 crc kubenswrapper[4900]: I0127 13:20:13.871694 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8q2cx"] Jan 27 13:20:14 crc kubenswrapper[4900]: I0127 13:20:14.840511 4900 generic.go:334] "Generic (PLEG): container finished" podID="ad45c5af-43ee-4d0b-99ca-89e03d86ceae" containerID="16f096dc35fde1024a04028f6547dde077bda38cd3083e71394921bf184d313b" exitCode=0 Jan 27 13:20:14 crc kubenswrapper[4900]: I0127 13:20:14.840758 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8q2cx" event={"ID":"ad45c5af-43ee-4d0b-99ca-89e03d86ceae","Type":"ContainerDied","Data":"16f096dc35fde1024a04028f6547dde077bda38cd3083e71394921bf184d313b"} Jan 27 13:20:14 crc kubenswrapper[4900]: I0127 13:20:14.840858 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8q2cx" event={"ID":"ad45c5af-43ee-4d0b-99ca-89e03d86ceae","Type":"ContainerStarted","Data":"51cdb6245b30afe5c68fdc1afe70cf23525aada62cf3e42408ef156001d7240d"} Jan 27 13:20:15 crc kubenswrapper[4900]: I0127 13:20:15.853495 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8q2cx" event={"ID":"ad45c5af-43ee-4d0b-99ca-89e03d86ceae","Type":"ContainerStarted","Data":"c7c77225371d5f294735c57728af7860bc886d55e2b2fb5da7ff0f0b7400ee17"} Jan 27 13:20:21 crc kubenswrapper[4900]: I0127 13:20:21.928765 4900 generic.go:334] "Generic (PLEG): container finished" podID="ad45c5af-43ee-4d0b-99ca-89e03d86ceae" containerID="c7c77225371d5f294735c57728af7860bc886d55e2b2fb5da7ff0f0b7400ee17" exitCode=0 Jan 27 13:20:21 crc kubenswrapper[4900]: I0127 13:20:21.928808 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8q2cx" event={"ID":"ad45c5af-43ee-4d0b-99ca-89e03d86ceae","Type":"ContainerDied","Data":"c7c77225371d5f294735c57728af7860bc886d55e2b2fb5da7ff0f0b7400ee17"} Jan 27 13:20:22 crc kubenswrapper[4900]: I0127 13:20:22.944333 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8q2cx" event={"ID":"ad45c5af-43ee-4d0b-99ca-89e03d86ceae","Type":"ContainerStarted","Data":"ca95a7cdef5f8095cc685a8c68480f798d09c341589b3e165fc4ae05fa937169"} Jan 27 13:20:22 crc kubenswrapper[4900]: I0127 13:20:22.978409 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8q2cx" podStartSLOduration=3.366288762 podStartE2EDuration="10.978382384s" podCreationTimestamp="2026-01-27 13:20:12 +0000 UTC" firstStartedPulling="2026-01-27 13:20:14.845773265 +0000 UTC m=+3242.082801485" lastFinishedPulling="2026-01-27 13:20:22.457866897 +0000 UTC m=+3249.694895107" observedRunningTime="2026-01-27 13:20:22.964993254 +0000 UTC m=+3250.202021464" watchObservedRunningTime="2026-01-27 13:20:22.978382384 +0000 UTC m=+3250.215410594" Jan 27 13:20:23 crc kubenswrapper[4900]: I0127 13:20:23.281618 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:23 crc kubenswrapper[4900]: I0127 13:20:23.281669 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:24 crc kubenswrapper[4900]: I0127 13:20:24.341706 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8q2cx" podUID="ad45c5af-43ee-4d0b-99ca-89e03d86ceae" containerName="registry-server" probeResult="failure" output=< Jan 27 13:20:24 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:20:24 crc kubenswrapper[4900]: > Jan 27 13:20:26 crc kubenswrapper[4900]: I0127 13:20:26.497330 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:20:27 crc kubenswrapper[4900]: I0127 13:20:27.263865 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"c597ab2a68a79b51e2bb511022ae0fb5a8221544d64818accf88cd1d6f181d1f"} Jan 27 13:20:33 crc kubenswrapper[4900]: I0127 13:20:33.362933 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:33 crc kubenswrapper[4900]: I0127 13:20:33.438164 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:33 crc kubenswrapper[4900]: I0127 13:20:33.615908 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8q2cx"] Jan 27 13:20:35 crc kubenswrapper[4900]: I0127 13:20:35.460410 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8q2cx" podUID="ad45c5af-43ee-4d0b-99ca-89e03d86ceae" containerName="registry-server" containerID="cri-o://ca95a7cdef5f8095cc685a8c68480f798d09c341589b3e165fc4ae05fa937169" gracePeriod=2 Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.016801 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.195477 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q227n\" (UniqueName: \"kubernetes.io/projected/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-kube-api-access-q227n\") pod \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\" (UID: \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\") " Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.195605 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-catalog-content\") pod \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\" (UID: \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\") " Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.196133 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-utilities\") pod \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\" (UID: \"ad45c5af-43ee-4d0b-99ca-89e03d86ceae\") " Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.197362 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-utilities" (OuterVolumeSpecName: "utilities") pod "ad45c5af-43ee-4d0b-99ca-89e03d86ceae" (UID: "ad45c5af-43ee-4d0b-99ca-89e03d86ceae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.203655 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-kube-api-access-q227n" (OuterVolumeSpecName: "kube-api-access-q227n") pod "ad45c5af-43ee-4d0b-99ca-89e03d86ceae" (UID: "ad45c5af-43ee-4d0b-99ca-89e03d86ceae"). InnerVolumeSpecName "kube-api-access-q227n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.323109 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q227n\" (UniqueName: \"kubernetes.io/projected/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-kube-api-access-q227n\") on node \"crc\" DevicePath \"\"" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.323179 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.336203 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ad45c5af-43ee-4d0b-99ca-89e03d86ceae" (UID: "ad45c5af-43ee-4d0b-99ca-89e03d86ceae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.426817 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad45c5af-43ee-4d0b-99ca-89e03d86ceae-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.473916 4900 generic.go:334] "Generic (PLEG): container finished" podID="ad45c5af-43ee-4d0b-99ca-89e03d86ceae" containerID="ca95a7cdef5f8095cc685a8c68480f798d09c341589b3e165fc4ae05fa937169" exitCode=0 Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.473971 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8q2cx" event={"ID":"ad45c5af-43ee-4d0b-99ca-89e03d86ceae","Type":"ContainerDied","Data":"ca95a7cdef5f8095cc685a8c68480f798d09c341589b3e165fc4ae05fa937169"} Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.473990 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8q2cx" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.474009 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8q2cx" event={"ID":"ad45c5af-43ee-4d0b-99ca-89e03d86ceae","Type":"ContainerDied","Data":"51cdb6245b30afe5c68fdc1afe70cf23525aada62cf3e42408ef156001d7240d"} Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.474028 4900 scope.go:117] "RemoveContainer" containerID="ca95a7cdef5f8095cc685a8c68480f798d09c341589b3e165fc4ae05fa937169" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.513504 4900 scope.go:117] "RemoveContainer" containerID="c7c77225371d5f294735c57728af7860bc886d55e2b2fb5da7ff0f0b7400ee17" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.523254 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8q2cx"] Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.537838 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8q2cx"] Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.562457 4900 scope.go:117] "RemoveContainer" containerID="16f096dc35fde1024a04028f6547dde077bda38cd3083e71394921bf184d313b" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.605333 4900 scope.go:117] "RemoveContainer" containerID="ca95a7cdef5f8095cc685a8c68480f798d09c341589b3e165fc4ae05fa937169" Jan 27 13:20:36 crc kubenswrapper[4900]: E0127 13:20:36.606842 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca95a7cdef5f8095cc685a8c68480f798d09c341589b3e165fc4ae05fa937169\": container with ID starting with ca95a7cdef5f8095cc685a8c68480f798d09c341589b3e165fc4ae05fa937169 not found: ID does not exist" containerID="ca95a7cdef5f8095cc685a8c68480f798d09c341589b3e165fc4ae05fa937169" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.606938 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca95a7cdef5f8095cc685a8c68480f798d09c341589b3e165fc4ae05fa937169"} err="failed to get container status \"ca95a7cdef5f8095cc685a8c68480f798d09c341589b3e165fc4ae05fa937169\": rpc error: code = NotFound desc = could not find container \"ca95a7cdef5f8095cc685a8c68480f798d09c341589b3e165fc4ae05fa937169\": container with ID starting with ca95a7cdef5f8095cc685a8c68480f798d09c341589b3e165fc4ae05fa937169 not found: ID does not exist" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.607009 4900 scope.go:117] "RemoveContainer" containerID="c7c77225371d5f294735c57728af7860bc886d55e2b2fb5da7ff0f0b7400ee17" Jan 27 13:20:36 crc kubenswrapper[4900]: E0127 13:20:36.608322 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7c77225371d5f294735c57728af7860bc886d55e2b2fb5da7ff0f0b7400ee17\": container with ID starting with c7c77225371d5f294735c57728af7860bc886d55e2b2fb5da7ff0f0b7400ee17 not found: ID does not exist" containerID="c7c77225371d5f294735c57728af7860bc886d55e2b2fb5da7ff0f0b7400ee17" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.608382 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7c77225371d5f294735c57728af7860bc886d55e2b2fb5da7ff0f0b7400ee17"} err="failed to get container status \"c7c77225371d5f294735c57728af7860bc886d55e2b2fb5da7ff0f0b7400ee17\": rpc error: code = NotFound desc = could not find container \"c7c77225371d5f294735c57728af7860bc886d55e2b2fb5da7ff0f0b7400ee17\": container with ID starting with c7c77225371d5f294735c57728af7860bc886d55e2b2fb5da7ff0f0b7400ee17 not found: ID does not exist" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.608445 4900 scope.go:117] "RemoveContainer" containerID="16f096dc35fde1024a04028f6547dde077bda38cd3083e71394921bf184d313b" Jan 27 13:20:36 crc kubenswrapper[4900]: E0127 13:20:36.608967 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16f096dc35fde1024a04028f6547dde077bda38cd3083e71394921bf184d313b\": container with ID starting with 16f096dc35fde1024a04028f6547dde077bda38cd3083e71394921bf184d313b not found: ID does not exist" containerID="16f096dc35fde1024a04028f6547dde077bda38cd3083e71394921bf184d313b" Jan 27 13:20:36 crc kubenswrapper[4900]: I0127 13:20:36.609014 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16f096dc35fde1024a04028f6547dde077bda38cd3083e71394921bf184d313b"} err="failed to get container status \"16f096dc35fde1024a04028f6547dde077bda38cd3083e71394921bf184d313b\": rpc error: code = NotFound desc = could not find container \"16f096dc35fde1024a04028f6547dde077bda38cd3083e71394921bf184d313b\": container with ID starting with 16f096dc35fde1024a04028f6547dde077bda38cd3083e71394921bf184d313b not found: ID does not exist" Jan 27 13:20:38 crc kubenswrapper[4900]: I0127 13:20:38.496124 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad45c5af-43ee-4d0b-99ca-89e03d86ceae" path="/var/lib/kubelet/pods/ad45c5af-43ee-4d0b-99ca-89e03d86ceae/volumes" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.198522 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-k6ghf"] Jan 27 13:21:36 crc kubenswrapper[4900]: E0127 13:21:36.199790 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad45c5af-43ee-4d0b-99ca-89e03d86ceae" containerName="extract-utilities" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.199805 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad45c5af-43ee-4d0b-99ca-89e03d86ceae" containerName="extract-utilities" Jan 27 13:21:36 crc kubenswrapper[4900]: E0127 13:21:36.199843 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad45c5af-43ee-4d0b-99ca-89e03d86ceae" containerName="registry-server" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.199849 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad45c5af-43ee-4d0b-99ca-89e03d86ceae" containerName="registry-server" Jan 27 13:21:36 crc kubenswrapper[4900]: E0127 13:21:36.199868 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad45c5af-43ee-4d0b-99ca-89e03d86ceae" containerName="extract-content" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.199876 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad45c5af-43ee-4d0b-99ca-89e03d86ceae" containerName="extract-content" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.200113 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad45c5af-43ee-4d0b-99ca-89e03d86ceae" containerName="registry-server" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.201962 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.223633 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k6ghf"] Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.355349 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f197dba-63db-49f4-a9c8-877c4acbeabc-utilities\") pod \"certified-operators-k6ghf\" (UID: \"7f197dba-63db-49f4-a9c8-877c4acbeabc\") " pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.355978 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f197dba-63db-49f4-a9c8-877c4acbeabc-catalog-content\") pod \"certified-operators-k6ghf\" (UID: \"7f197dba-63db-49f4-a9c8-877c4acbeabc\") " pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.356197 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwb6b\" (UniqueName: \"kubernetes.io/projected/7f197dba-63db-49f4-a9c8-877c4acbeabc-kube-api-access-nwb6b\") pod \"certified-operators-k6ghf\" (UID: \"7f197dba-63db-49f4-a9c8-877c4acbeabc\") " pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.395688 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sfjfc"] Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.416463 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.462078 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f197dba-63db-49f4-a9c8-877c4acbeabc-catalog-content\") pod \"certified-operators-k6ghf\" (UID: \"7f197dba-63db-49f4-a9c8-877c4acbeabc\") " pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.462175 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwb6b\" (UniqueName: \"kubernetes.io/projected/7f197dba-63db-49f4-a9c8-877c4acbeabc-kube-api-access-nwb6b\") pod \"certified-operators-k6ghf\" (UID: \"7f197dba-63db-49f4-a9c8-877c4acbeabc\") " pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.462399 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f197dba-63db-49f4-a9c8-877c4acbeabc-utilities\") pod \"certified-operators-k6ghf\" (UID: \"7f197dba-63db-49f4-a9c8-877c4acbeabc\") " pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.466680 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f197dba-63db-49f4-a9c8-877c4acbeabc-utilities\") pod \"certified-operators-k6ghf\" (UID: \"7f197dba-63db-49f4-a9c8-877c4acbeabc\") " pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.467394 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sfjfc"] Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.467705 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f197dba-63db-49f4-a9c8-877c4acbeabc-catalog-content\") pod \"certified-operators-k6ghf\" (UID: \"7f197dba-63db-49f4-a9c8-877c4acbeabc\") " pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.521024 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwb6b\" (UniqueName: \"kubernetes.io/projected/7f197dba-63db-49f4-a9c8-877c4acbeabc-kube-api-access-nwb6b\") pod \"certified-operators-k6ghf\" (UID: \"7f197dba-63db-49f4-a9c8-877c4acbeabc\") " pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.527385 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.567402 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e851c54-9261-4aec-88b9-a0beb014d8cd-catalog-content\") pod \"redhat-marketplace-sfjfc\" (UID: \"3e851c54-9261-4aec-88b9-a0beb014d8cd\") " pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.567816 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e851c54-9261-4aec-88b9-a0beb014d8cd-utilities\") pod \"redhat-marketplace-sfjfc\" (UID: \"3e851c54-9261-4aec-88b9-a0beb014d8cd\") " pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.567996 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjdf6\" (UniqueName: \"kubernetes.io/projected/3e851c54-9261-4aec-88b9-a0beb014d8cd-kube-api-access-gjdf6\") pod \"redhat-marketplace-sfjfc\" (UID: \"3e851c54-9261-4aec-88b9-a0beb014d8cd\") " pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.680219 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjdf6\" (UniqueName: \"kubernetes.io/projected/3e851c54-9261-4aec-88b9-a0beb014d8cd-kube-api-access-gjdf6\") pod \"redhat-marketplace-sfjfc\" (UID: \"3e851c54-9261-4aec-88b9-a0beb014d8cd\") " pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.680830 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e851c54-9261-4aec-88b9-a0beb014d8cd-catalog-content\") pod \"redhat-marketplace-sfjfc\" (UID: \"3e851c54-9261-4aec-88b9-a0beb014d8cd\") " pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.681091 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e851c54-9261-4aec-88b9-a0beb014d8cd-utilities\") pod \"redhat-marketplace-sfjfc\" (UID: \"3e851c54-9261-4aec-88b9-a0beb014d8cd\") " pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.682502 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e851c54-9261-4aec-88b9-a0beb014d8cd-catalog-content\") pod \"redhat-marketplace-sfjfc\" (UID: \"3e851c54-9261-4aec-88b9-a0beb014d8cd\") " pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.684012 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e851c54-9261-4aec-88b9-a0beb014d8cd-utilities\") pod \"redhat-marketplace-sfjfc\" (UID: \"3e851c54-9261-4aec-88b9-a0beb014d8cd\") " pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:36 crc kubenswrapper[4900]: I0127 13:21:36.758777 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjdf6\" (UniqueName: \"kubernetes.io/projected/3e851c54-9261-4aec-88b9-a0beb014d8cd-kube-api-access-gjdf6\") pod \"redhat-marketplace-sfjfc\" (UID: \"3e851c54-9261-4aec-88b9-a0beb014d8cd\") " pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:37 crc kubenswrapper[4900]: I0127 13:21:37.061614 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:37 crc kubenswrapper[4900]: I0127 13:21:37.161273 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k6ghf"] Jan 27 13:21:37 crc kubenswrapper[4900]: I0127 13:21:37.285603 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k6ghf" event={"ID":"7f197dba-63db-49f4-a9c8-877c4acbeabc","Type":"ContainerStarted","Data":"a8fb2cf622a653746309ccaa7cf43a28c67486ae2def983510f9fdc7c0b91a9d"} Jan 27 13:21:37 crc kubenswrapper[4900]: I0127 13:21:37.783839 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sfjfc"] Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.302416 4900 generic.go:334] "Generic (PLEG): container finished" podID="3e851c54-9261-4aec-88b9-a0beb014d8cd" containerID="10293a9b2ede87d84e8e2761cf78cb0cb0f392ea925207679817405867492e49" exitCode=0 Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.302486 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sfjfc" event={"ID":"3e851c54-9261-4aec-88b9-a0beb014d8cd","Type":"ContainerDied","Data":"10293a9b2ede87d84e8e2761cf78cb0cb0f392ea925207679817405867492e49"} Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.304559 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sfjfc" event={"ID":"3e851c54-9261-4aec-88b9-a0beb014d8cd","Type":"ContainerStarted","Data":"119bd0c47bdb5ec4a43e605d1c4ecc4d2de17b946af58b5e469a4615acbb30bc"} Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.308766 4900 generic.go:334] "Generic (PLEG): container finished" podID="7f197dba-63db-49f4-a9c8-877c4acbeabc" containerID="6e17b42b1f6fbb4b0518a96dd25690af485393e79446c4d37b557a7cc0a58a90" exitCode=0 Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.308829 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k6ghf" event={"ID":"7f197dba-63db-49f4-a9c8-877c4acbeabc","Type":"ContainerDied","Data":"6e17b42b1f6fbb4b0518a96dd25690af485393e79446c4d37b557a7cc0a58a90"} Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.599290 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-92kp6"] Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.604493 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.620001 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-92kp6"] Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.640333 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qgsk\" (UniqueName: \"kubernetes.io/projected/30d76435-7dcd-4fad-b4f3-ac2b562d5649-kube-api-access-2qgsk\") pod \"community-operators-92kp6\" (UID: \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\") " pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.640684 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30d76435-7dcd-4fad-b4f3-ac2b562d5649-utilities\") pod \"community-operators-92kp6\" (UID: \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\") " pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.640826 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30d76435-7dcd-4fad-b4f3-ac2b562d5649-catalog-content\") pod \"community-operators-92kp6\" (UID: \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\") " pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.742925 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qgsk\" (UniqueName: \"kubernetes.io/projected/30d76435-7dcd-4fad-b4f3-ac2b562d5649-kube-api-access-2qgsk\") pod \"community-operators-92kp6\" (UID: \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\") " pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.743444 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30d76435-7dcd-4fad-b4f3-ac2b562d5649-utilities\") pod \"community-operators-92kp6\" (UID: \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\") " pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.743487 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30d76435-7dcd-4fad-b4f3-ac2b562d5649-catalog-content\") pod \"community-operators-92kp6\" (UID: \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\") " pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.744081 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30d76435-7dcd-4fad-b4f3-ac2b562d5649-catalog-content\") pod \"community-operators-92kp6\" (UID: \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\") " pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.744173 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30d76435-7dcd-4fad-b4f3-ac2b562d5649-utilities\") pod \"community-operators-92kp6\" (UID: \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\") " pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.787999 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qgsk\" (UniqueName: \"kubernetes.io/projected/30d76435-7dcd-4fad-b4f3-ac2b562d5649-kube-api-access-2qgsk\") pod \"community-operators-92kp6\" (UID: \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\") " pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:38 crc kubenswrapper[4900]: I0127 13:21:38.938750 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:39 crc kubenswrapper[4900]: W0127 13:21:39.691181 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod30d76435_7dcd_4fad_b4f3_ac2b562d5649.slice/crio-fdb41479798d50626340f4b7e97402bc294fb6bf45aa18955ae49249cd62f9cd WatchSource:0}: Error finding container fdb41479798d50626340f4b7e97402bc294fb6bf45aa18955ae49249cd62f9cd: Status 404 returned error can't find the container with id fdb41479798d50626340f4b7e97402bc294fb6bf45aa18955ae49249cd62f9cd Jan 27 13:21:39 crc kubenswrapper[4900]: I0127 13:21:39.696544 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-92kp6"] Jan 27 13:21:40 crc kubenswrapper[4900]: I0127 13:21:40.442787 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k6ghf" event={"ID":"7f197dba-63db-49f4-a9c8-877c4acbeabc","Type":"ContainerStarted","Data":"693b78962f065114b78e7768c6202d8d04531a5ea4cafe1c1bbcf83e01efe3e3"} Jan 27 13:21:40 crc kubenswrapper[4900]: I0127 13:21:40.445460 4900 generic.go:334] "Generic (PLEG): container finished" podID="30d76435-7dcd-4fad-b4f3-ac2b562d5649" containerID="35d8335f4b4b8731396480ab4406840a29948ee10b6c38c3c612063d12f64d6e" exitCode=0 Jan 27 13:21:40 crc kubenswrapper[4900]: I0127 13:21:40.445557 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-92kp6" event={"ID":"30d76435-7dcd-4fad-b4f3-ac2b562d5649","Type":"ContainerDied","Data":"35d8335f4b4b8731396480ab4406840a29948ee10b6c38c3c612063d12f64d6e"} Jan 27 13:21:40 crc kubenswrapper[4900]: I0127 13:21:40.445590 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-92kp6" event={"ID":"30d76435-7dcd-4fad-b4f3-ac2b562d5649","Type":"ContainerStarted","Data":"fdb41479798d50626340f4b7e97402bc294fb6bf45aa18955ae49249cd62f9cd"} Jan 27 13:21:40 crc kubenswrapper[4900]: I0127 13:21:40.458486 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sfjfc" event={"ID":"3e851c54-9261-4aec-88b9-a0beb014d8cd","Type":"ContainerStarted","Data":"87c109d263ba0a3fb76d6acf091b7e557393bc6d25be533e197f49a5a8a52031"} Jan 27 13:21:41 crc kubenswrapper[4900]: I0127 13:21:41.480665 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sfjfc" event={"ID":"3e851c54-9261-4aec-88b9-a0beb014d8cd","Type":"ContainerDied","Data":"87c109d263ba0a3fb76d6acf091b7e557393bc6d25be533e197f49a5a8a52031"} Jan 27 13:21:41 crc kubenswrapper[4900]: I0127 13:21:41.484143 4900 generic.go:334] "Generic (PLEG): container finished" podID="3e851c54-9261-4aec-88b9-a0beb014d8cd" containerID="87c109d263ba0a3fb76d6acf091b7e557393bc6d25be533e197f49a5a8a52031" exitCode=0 Jan 27 13:21:41 crc kubenswrapper[4900]: I0127 13:21:41.504482 4900 generic.go:334] "Generic (PLEG): container finished" podID="7f197dba-63db-49f4-a9c8-877c4acbeabc" containerID="693b78962f065114b78e7768c6202d8d04531a5ea4cafe1c1bbcf83e01efe3e3" exitCode=0 Jan 27 13:21:41 crc kubenswrapper[4900]: I0127 13:21:41.504546 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k6ghf" event={"ID":"7f197dba-63db-49f4-a9c8-877c4acbeabc","Type":"ContainerDied","Data":"693b78962f065114b78e7768c6202d8d04531a5ea4cafe1c1bbcf83e01efe3e3"} Jan 27 13:21:42 crc kubenswrapper[4900]: I0127 13:21:42.526372 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-92kp6" event={"ID":"30d76435-7dcd-4fad-b4f3-ac2b562d5649","Type":"ContainerStarted","Data":"a74456ac3a2058462ec97a14c635b06c6d7a058eabadcab266b8b41360e63d6a"} Jan 27 13:21:43 crc kubenswrapper[4900]: I0127 13:21:43.544946 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sfjfc" event={"ID":"3e851c54-9261-4aec-88b9-a0beb014d8cd","Type":"ContainerStarted","Data":"77a6c4d202fd784f24ada7a13ae31bff3355d49c073bf9923baf05e6a248fb50"} Jan 27 13:21:43 crc kubenswrapper[4900]: I0127 13:21:43.555921 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k6ghf" event={"ID":"7f197dba-63db-49f4-a9c8-877c4acbeabc","Type":"ContainerStarted","Data":"e7a9bf969928656016f36a4c50d7df9f40b057f16af31a24efef064a1eb877e8"} Jan 27 13:21:43 crc kubenswrapper[4900]: I0127 13:21:43.604615 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sfjfc" podStartSLOduration=3.78867689 podStartE2EDuration="7.604587768s" podCreationTimestamp="2026-01-27 13:21:36 +0000 UTC" firstStartedPulling="2026-01-27 13:21:38.306769948 +0000 UTC m=+3325.543798158" lastFinishedPulling="2026-01-27 13:21:42.122680826 +0000 UTC m=+3329.359709036" observedRunningTime="2026-01-27 13:21:43.574686087 +0000 UTC m=+3330.811714297" watchObservedRunningTime="2026-01-27 13:21:43.604587768 +0000 UTC m=+3330.841615978" Jan 27 13:21:43 crc kubenswrapper[4900]: I0127 13:21:43.608478 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-k6ghf" podStartSLOduration=3.815025567 podStartE2EDuration="7.608465381s" podCreationTimestamp="2026-01-27 13:21:36 +0000 UTC" firstStartedPulling="2026-01-27 13:21:38.312803323 +0000 UTC m=+3325.549831533" lastFinishedPulling="2026-01-27 13:21:42.106243127 +0000 UTC m=+3329.343271347" observedRunningTime="2026-01-27 13:21:43.596328918 +0000 UTC m=+3330.833357128" watchObservedRunningTime="2026-01-27 13:21:43.608465381 +0000 UTC m=+3330.845493591" Jan 27 13:21:44 crc kubenswrapper[4900]: I0127 13:21:44.572776 4900 generic.go:334] "Generic (PLEG): container finished" podID="30d76435-7dcd-4fad-b4f3-ac2b562d5649" containerID="a74456ac3a2058462ec97a14c635b06c6d7a058eabadcab266b8b41360e63d6a" exitCode=0 Jan 27 13:21:44 crc kubenswrapper[4900]: I0127 13:21:44.572874 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-92kp6" event={"ID":"30d76435-7dcd-4fad-b4f3-ac2b562d5649","Type":"ContainerDied","Data":"a74456ac3a2058462ec97a14c635b06c6d7a058eabadcab266b8b41360e63d6a"} Jan 27 13:21:45 crc kubenswrapper[4900]: I0127 13:21:45.587776 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-92kp6" event={"ID":"30d76435-7dcd-4fad-b4f3-ac2b562d5649","Type":"ContainerStarted","Data":"d46292f00ad889788c12b0887e4d698afbb4efc86782e1a6c2157b22ec65af69"} Jan 27 13:21:45 crc kubenswrapper[4900]: I0127 13:21:45.608885 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-92kp6" podStartSLOduration=2.8652409 podStartE2EDuration="7.608867393s" podCreationTimestamp="2026-01-27 13:21:38 +0000 UTC" firstStartedPulling="2026-01-27 13:21:40.453616783 +0000 UTC m=+3327.690644993" lastFinishedPulling="2026-01-27 13:21:45.197243276 +0000 UTC m=+3332.434271486" observedRunningTime="2026-01-27 13:21:45.607318168 +0000 UTC m=+3332.844346378" watchObservedRunningTime="2026-01-27 13:21:45.608867393 +0000 UTC m=+3332.845895603" Jan 27 13:21:46 crc kubenswrapper[4900]: I0127 13:21:46.530771 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:46 crc kubenswrapper[4900]: I0127 13:21:46.530845 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:47 crc kubenswrapper[4900]: I0127 13:21:47.063846 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:47 crc kubenswrapper[4900]: I0127 13:21:47.064244 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:47 crc kubenswrapper[4900]: I0127 13:21:47.126369 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:47 crc kubenswrapper[4900]: I0127 13:21:47.595420 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-k6ghf" podUID="7f197dba-63db-49f4-a9c8-877c4acbeabc" containerName="registry-server" probeResult="failure" output=< Jan 27 13:21:47 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:21:47 crc kubenswrapper[4900]: > Jan 27 13:21:47 crc kubenswrapper[4900]: I0127 13:21:47.711587 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:48 crc kubenswrapper[4900]: I0127 13:21:48.939160 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:48 crc kubenswrapper[4900]: I0127 13:21:48.939688 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:49 crc kubenswrapper[4900]: I0127 13:21:49.027075 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:50 crc kubenswrapper[4900]: I0127 13:21:50.707466 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:51 crc kubenswrapper[4900]: I0127 13:21:51.779277 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sfjfc"] Jan 27 13:21:51 crc kubenswrapper[4900]: I0127 13:21:51.780134 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-sfjfc" podUID="3e851c54-9261-4aec-88b9-a0beb014d8cd" containerName="registry-server" containerID="cri-o://77a6c4d202fd784f24ada7a13ae31bff3355d49c073bf9923baf05e6a248fb50" gracePeriod=2 Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.330290 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.529376 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e851c54-9261-4aec-88b9-a0beb014d8cd-utilities\") pod \"3e851c54-9261-4aec-88b9-a0beb014d8cd\" (UID: \"3e851c54-9261-4aec-88b9-a0beb014d8cd\") " Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.529559 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjdf6\" (UniqueName: \"kubernetes.io/projected/3e851c54-9261-4aec-88b9-a0beb014d8cd-kube-api-access-gjdf6\") pod \"3e851c54-9261-4aec-88b9-a0beb014d8cd\" (UID: \"3e851c54-9261-4aec-88b9-a0beb014d8cd\") " Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.529881 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e851c54-9261-4aec-88b9-a0beb014d8cd-catalog-content\") pod \"3e851c54-9261-4aec-88b9-a0beb014d8cd\" (UID: \"3e851c54-9261-4aec-88b9-a0beb014d8cd\") " Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.530206 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e851c54-9261-4aec-88b9-a0beb014d8cd-utilities" (OuterVolumeSpecName: "utilities") pod "3e851c54-9261-4aec-88b9-a0beb014d8cd" (UID: "3e851c54-9261-4aec-88b9-a0beb014d8cd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.533836 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e851c54-9261-4aec-88b9-a0beb014d8cd-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.540792 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e851c54-9261-4aec-88b9-a0beb014d8cd-kube-api-access-gjdf6" (OuterVolumeSpecName: "kube-api-access-gjdf6") pod "3e851c54-9261-4aec-88b9-a0beb014d8cd" (UID: "3e851c54-9261-4aec-88b9-a0beb014d8cd"). InnerVolumeSpecName "kube-api-access-gjdf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.564393 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e851c54-9261-4aec-88b9-a0beb014d8cd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3e851c54-9261-4aec-88b9-a0beb014d8cd" (UID: "3e851c54-9261-4aec-88b9-a0beb014d8cd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.637197 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjdf6\" (UniqueName: \"kubernetes.io/projected/3e851c54-9261-4aec-88b9-a0beb014d8cd-kube-api-access-gjdf6\") on node \"crc\" DevicePath \"\"" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.637267 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e851c54-9261-4aec-88b9-a0beb014d8cd-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.681090 4900 generic.go:334] "Generic (PLEG): container finished" podID="3e851c54-9261-4aec-88b9-a0beb014d8cd" containerID="77a6c4d202fd784f24ada7a13ae31bff3355d49c073bf9923baf05e6a248fb50" exitCode=0 Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.681174 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sfjfc" event={"ID":"3e851c54-9261-4aec-88b9-a0beb014d8cd","Type":"ContainerDied","Data":"77a6c4d202fd784f24ada7a13ae31bff3355d49c073bf9923baf05e6a248fb50"} Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.681192 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sfjfc" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.681236 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sfjfc" event={"ID":"3e851c54-9261-4aec-88b9-a0beb014d8cd","Type":"ContainerDied","Data":"119bd0c47bdb5ec4a43e605d1c4ecc4d2de17b946af58b5e469a4615acbb30bc"} Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.681270 4900 scope.go:117] "RemoveContainer" containerID="77a6c4d202fd784f24ada7a13ae31bff3355d49c073bf9923baf05e6a248fb50" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.724810 4900 scope.go:117] "RemoveContainer" containerID="87c109d263ba0a3fb76d6acf091b7e557393bc6d25be533e197f49a5a8a52031" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.739792 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sfjfc"] Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.774868 4900 scope.go:117] "RemoveContainer" containerID="10293a9b2ede87d84e8e2761cf78cb0cb0f392ea925207679817405867492e49" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.776002 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-sfjfc"] Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.839762 4900 scope.go:117] "RemoveContainer" containerID="77a6c4d202fd784f24ada7a13ae31bff3355d49c073bf9923baf05e6a248fb50" Jan 27 13:21:52 crc kubenswrapper[4900]: E0127 13:21:52.840619 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77a6c4d202fd784f24ada7a13ae31bff3355d49c073bf9923baf05e6a248fb50\": container with ID starting with 77a6c4d202fd784f24ada7a13ae31bff3355d49c073bf9923baf05e6a248fb50 not found: ID does not exist" containerID="77a6c4d202fd784f24ada7a13ae31bff3355d49c073bf9923baf05e6a248fb50" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.840693 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77a6c4d202fd784f24ada7a13ae31bff3355d49c073bf9923baf05e6a248fb50"} err="failed to get container status \"77a6c4d202fd784f24ada7a13ae31bff3355d49c073bf9923baf05e6a248fb50\": rpc error: code = NotFound desc = could not find container \"77a6c4d202fd784f24ada7a13ae31bff3355d49c073bf9923baf05e6a248fb50\": container with ID starting with 77a6c4d202fd784f24ada7a13ae31bff3355d49c073bf9923baf05e6a248fb50 not found: ID does not exist" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.840743 4900 scope.go:117] "RemoveContainer" containerID="87c109d263ba0a3fb76d6acf091b7e557393bc6d25be533e197f49a5a8a52031" Jan 27 13:21:52 crc kubenswrapper[4900]: E0127 13:21:52.841409 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87c109d263ba0a3fb76d6acf091b7e557393bc6d25be533e197f49a5a8a52031\": container with ID starting with 87c109d263ba0a3fb76d6acf091b7e557393bc6d25be533e197f49a5a8a52031 not found: ID does not exist" containerID="87c109d263ba0a3fb76d6acf091b7e557393bc6d25be533e197f49a5a8a52031" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.841447 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87c109d263ba0a3fb76d6acf091b7e557393bc6d25be533e197f49a5a8a52031"} err="failed to get container status \"87c109d263ba0a3fb76d6acf091b7e557393bc6d25be533e197f49a5a8a52031\": rpc error: code = NotFound desc = could not find container \"87c109d263ba0a3fb76d6acf091b7e557393bc6d25be533e197f49a5a8a52031\": container with ID starting with 87c109d263ba0a3fb76d6acf091b7e557393bc6d25be533e197f49a5a8a52031 not found: ID does not exist" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.841475 4900 scope.go:117] "RemoveContainer" containerID="10293a9b2ede87d84e8e2761cf78cb0cb0f392ea925207679817405867492e49" Jan 27 13:21:52 crc kubenswrapper[4900]: E0127 13:21:52.842177 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10293a9b2ede87d84e8e2761cf78cb0cb0f392ea925207679817405867492e49\": container with ID starting with 10293a9b2ede87d84e8e2761cf78cb0cb0f392ea925207679817405867492e49 not found: ID does not exist" containerID="10293a9b2ede87d84e8e2761cf78cb0cb0f392ea925207679817405867492e49" Jan 27 13:21:52 crc kubenswrapper[4900]: I0127 13:21:52.842439 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10293a9b2ede87d84e8e2761cf78cb0cb0f392ea925207679817405867492e49"} err="failed to get container status \"10293a9b2ede87d84e8e2761cf78cb0cb0f392ea925207679817405867492e49\": rpc error: code = NotFound desc = could not find container \"10293a9b2ede87d84e8e2761cf78cb0cb0f392ea925207679817405867492e49\": container with ID starting with 10293a9b2ede87d84e8e2761cf78cb0cb0f392ea925207679817405867492e49 not found: ID does not exist" Jan 27 13:21:53 crc kubenswrapper[4900]: I0127 13:21:53.581089 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-92kp6"] Jan 27 13:21:53 crc kubenswrapper[4900]: I0127 13:21:53.581747 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-92kp6" podUID="30d76435-7dcd-4fad-b4f3-ac2b562d5649" containerName="registry-server" containerID="cri-o://d46292f00ad889788c12b0887e4d698afbb4efc86782e1a6c2157b22ec65af69" gracePeriod=2 Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.317016 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.417762 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30d76435-7dcd-4fad-b4f3-ac2b562d5649-catalog-content\") pod \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\" (UID: \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\") " Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.417898 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30d76435-7dcd-4fad-b4f3-ac2b562d5649-utilities\") pod \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\" (UID: \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\") " Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.417951 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qgsk\" (UniqueName: \"kubernetes.io/projected/30d76435-7dcd-4fad-b4f3-ac2b562d5649-kube-api-access-2qgsk\") pod \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\" (UID: \"30d76435-7dcd-4fad-b4f3-ac2b562d5649\") " Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.419228 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30d76435-7dcd-4fad-b4f3-ac2b562d5649-utilities" (OuterVolumeSpecName: "utilities") pod "30d76435-7dcd-4fad-b4f3-ac2b562d5649" (UID: "30d76435-7dcd-4fad-b4f3-ac2b562d5649"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.426289 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30d76435-7dcd-4fad-b4f3-ac2b562d5649-kube-api-access-2qgsk" (OuterVolumeSpecName: "kube-api-access-2qgsk") pod "30d76435-7dcd-4fad-b4f3-ac2b562d5649" (UID: "30d76435-7dcd-4fad-b4f3-ac2b562d5649"). InnerVolumeSpecName "kube-api-access-2qgsk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.476085 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/30d76435-7dcd-4fad-b4f3-ac2b562d5649-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "30d76435-7dcd-4fad-b4f3-ac2b562d5649" (UID: "30d76435-7dcd-4fad-b4f3-ac2b562d5649"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.506012 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e851c54-9261-4aec-88b9-a0beb014d8cd" path="/var/lib/kubelet/pods/3e851c54-9261-4aec-88b9-a0beb014d8cd/volumes" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.521358 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30d76435-7dcd-4fad-b4f3-ac2b562d5649-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.521862 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30d76435-7dcd-4fad-b4f3-ac2b562d5649-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.521947 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qgsk\" (UniqueName: \"kubernetes.io/projected/30d76435-7dcd-4fad-b4f3-ac2b562d5649-kube-api-access-2qgsk\") on node \"crc\" DevicePath \"\"" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.812505 4900 generic.go:334] "Generic (PLEG): container finished" podID="30d76435-7dcd-4fad-b4f3-ac2b562d5649" containerID="d46292f00ad889788c12b0887e4d698afbb4efc86782e1a6c2157b22ec65af69" exitCode=0 Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.812578 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-92kp6" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.812598 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-92kp6" event={"ID":"30d76435-7dcd-4fad-b4f3-ac2b562d5649","Type":"ContainerDied","Data":"d46292f00ad889788c12b0887e4d698afbb4efc86782e1a6c2157b22ec65af69"} Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.812931 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-92kp6" event={"ID":"30d76435-7dcd-4fad-b4f3-ac2b562d5649","Type":"ContainerDied","Data":"fdb41479798d50626340f4b7e97402bc294fb6bf45aa18955ae49249cd62f9cd"} Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.812952 4900 scope.go:117] "RemoveContainer" containerID="d46292f00ad889788c12b0887e4d698afbb4efc86782e1a6c2157b22ec65af69" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.854647 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-92kp6"] Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.855404 4900 scope.go:117] "RemoveContainer" containerID="a74456ac3a2058462ec97a14c635b06c6d7a058eabadcab266b8b41360e63d6a" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.871613 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-92kp6"] Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.901433 4900 scope.go:117] "RemoveContainer" containerID="35d8335f4b4b8731396480ab4406840a29948ee10b6c38c3c612063d12f64d6e" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.947525 4900 scope.go:117] "RemoveContainer" containerID="d46292f00ad889788c12b0887e4d698afbb4efc86782e1a6c2157b22ec65af69" Jan 27 13:21:54 crc kubenswrapper[4900]: E0127 13:21:54.948135 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d46292f00ad889788c12b0887e4d698afbb4efc86782e1a6c2157b22ec65af69\": container with ID starting with d46292f00ad889788c12b0887e4d698afbb4efc86782e1a6c2157b22ec65af69 not found: ID does not exist" containerID="d46292f00ad889788c12b0887e4d698afbb4efc86782e1a6c2157b22ec65af69" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.948195 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d46292f00ad889788c12b0887e4d698afbb4efc86782e1a6c2157b22ec65af69"} err="failed to get container status \"d46292f00ad889788c12b0887e4d698afbb4efc86782e1a6c2157b22ec65af69\": rpc error: code = NotFound desc = could not find container \"d46292f00ad889788c12b0887e4d698afbb4efc86782e1a6c2157b22ec65af69\": container with ID starting with d46292f00ad889788c12b0887e4d698afbb4efc86782e1a6c2157b22ec65af69 not found: ID does not exist" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.948227 4900 scope.go:117] "RemoveContainer" containerID="a74456ac3a2058462ec97a14c635b06c6d7a058eabadcab266b8b41360e63d6a" Jan 27 13:21:54 crc kubenswrapper[4900]: E0127 13:21:54.948879 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a74456ac3a2058462ec97a14c635b06c6d7a058eabadcab266b8b41360e63d6a\": container with ID starting with a74456ac3a2058462ec97a14c635b06c6d7a058eabadcab266b8b41360e63d6a not found: ID does not exist" containerID="a74456ac3a2058462ec97a14c635b06c6d7a058eabadcab266b8b41360e63d6a" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.948908 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a74456ac3a2058462ec97a14c635b06c6d7a058eabadcab266b8b41360e63d6a"} err="failed to get container status \"a74456ac3a2058462ec97a14c635b06c6d7a058eabadcab266b8b41360e63d6a\": rpc error: code = NotFound desc = could not find container \"a74456ac3a2058462ec97a14c635b06c6d7a058eabadcab266b8b41360e63d6a\": container with ID starting with a74456ac3a2058462ec97a14c635b06c6d7a058eabadcab266b8b41360e63d6a not found: ID does not exist" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.948924 4900 scope.go:117] "RemoveContainer" containerID="35d8335f4b4b8731396480ab4406840a29948ee10b6c38c3c612063d12f64d6e" Jan 27 13:21:54 crc kubenswrapper[4900]: E0127 13:21:54.949214 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35d8335f4b4b8731396480ab4406840a29948ee10b6c38c3c612063d12f64d6e\": container with ID starting with 35d8335f4b4b8731396480ab4406840a29948ee10b6c38c3c612063d12f64d6e not found: ID does not exist" containerID="35d8335f4b4b8731396480ab4406840a29948ee10b6c38c3c612063d12f64d6e" Jan 27 13:21:54 crc kubenswrapper[4900]: I0127 13:21:54.949239 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35d8335f4b4b8731396480ab4406840a29948ee10b6c38c3c612063d12f64d6e"} err="failed to get container status \"35d8335f4b4b8731396480ab4406840a29948ee10b6c38c3c612063d12f64d6e\": rpc error: code = NotFound desc = could not find container \"35d8335f4b4b8731396480ab4406840a29948ee10b6c38c3c612063d12f64d6e\": container with ID starting with 35d8335f4b4b8731396480ab4406840a29948ee10b6c38c3c612063d12f64d6e not found: ID does not exist" Jan 27 13:21:56 crc kubenswrapper[4900]: I0127 13:21:56.495997 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30d76435-7dcd-4fad-b4f3-ac2b562d5649" path="/var/lib/kubelet/pods/30d76435-7dcd-4fad-b4f3-ac2b562d5649/volumes" Jan 27 13:21:56 crc kubenswrapper[4900]: I0127 13:21:56.591475 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:56 crc kubenswrapper[4900]: I0127 13:21:56.650745 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:57 crc kubenswrapper[4900]: I0127 13:21:57.983906 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k6ghf"] Jan 27 13:21:57 crc kubenswrapper[4900]: I0127 13:21:57.984192 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-k6ghf" podUID="7f197dba-63db-49f4-a9c8-877c4acbeabc" containerName="registry-server" containerID="cri-o://e7a9bf969928656016f36a4c50d7df9f40b057f16af31a24efef064a1eb877e8" gracePeriod=2 Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.607648 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.655362 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwb6b\" (UniqueName: \"kubernetes.io/projected/7f197dba-63db-49f4-a9c8-877c4acbeabc-kube-api-access-nwb6b\") pod \"7f197dba-63db-49f4-a9c8-877c4acbeabc\" (UID: \"7f197dba-63db-49f4-a9c8-877c4acbeabc\") " Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.655522 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f197dba-63db-49f4-a9c8-877c4acbeabc-catalog-content\") pod \"7f197dba-63db-49f4-a9c8-877c4acbeabc\" (UID: \"7f197dba-63db-49f4-a9c8-877c4acbeabc\") " Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.655878 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f197dba-63db-49f4-a9c8-877c4acbeabc-utilities\") pod \"7f197dba-63db-49f4-a9c8-877c4acbeabc\" (UID: \"7f197dba-63db-49f4-a9c8-877c4acbeabc\") " Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.656407 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f197dba-63db-49f4-a9c8-877c4acbeabc-utilities" (OuterVolumeSpecName: "utilities") pod "7f197dba-63db-49f4-a9c8-877c4acbeabc" (UID: "7f197dba-63db-49f4-a9c8-877c4acbeabc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.656941 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f197dba-63db-49f4-a9c8-877c4acbeabc-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.666268 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f197dba-63db-49f4-a9c8-877c4acbeabc-kube-api-access-nwb6b" (OuterVolumeSpecName: "kube-api-access-nwb6b") pod "7f197dba-63db-49f4-a9c8-877c4acbeabc" (UID: "7f197dba-63db-49f4-a9c8-877c4acbeabc"). InnerVolumeSpecName "kube-api-access-nwb6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.706582 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f197dba-63db-49f4-a9c8-877c4acbeabc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7f197dba-63db-49f4-a9c8-877c4acbeabc" (UID: "7f197dba-63db-49f4-a9c8-877c4acbeabc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.759432 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwb6b\" (UniqueName: \"kubernetes.io/projected/7f197dba-63db-49f4-a9c8-877c4acbeabc-kube-api-access-nwb6b\") on node \"crc\" DevicePath \"\"" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.759466 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f197dba-63db-49f4-a9c8-877c4acbeabc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.859467 4900 generic.go:334] "Generic (PLEG): container finished" podID="7f197dba-63db-49f4-a9c8-877c4acbeabc" containerID="e7a9bf969928656016f36a4c50d7df9f40b057f16af31a24efef064a1eb877e8" exitCode=0 Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.859529 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k6ghf" event={"ID":"7f197dba-63db-49f4-a9c8-877c4acbeabc","Type":"ContainerDied","Data":"e7a9bf969928656016f36a4c50d7df9f40b057f16af31a24efef064a1eb877e8"} Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.859567 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k6ghf" event={"ID":"7f197dba-63db-49f4-a9c8-877c4acbeabc","Type":"ContainerDied","Data":"a8fb2cf622a653746309ccaa7cf43a28c67486ae2def983510f9fdc7c0b91a9d"} Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.859570 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k6ghf" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.859584 4900 scope.go:117] "RemoveContainer" containerID="e7a9bf969928656016f36a4c50d7df9f40b057f16af31a24efef064a1eb877e8" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.888371 4900 scope.go:117] "RemoveContainer" containerID="693b78962f065114b78e7768c6202d8d04531a5ea4cafe1c1bbcf83e01efe3e3" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.908901 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k6ghf"] Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.920470 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-k6ghf"] Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.942593 4900 scope.go:117] "RemoveContainer" containerID="6e17b42b1f6fbb4b0518a96dd25690af485393e79446c4d37b557a7cc0a58a90" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.992253 4900 scope.go:117] "RemoveContainer" containerID="e7a9bf969928656016f36a4c50d7df9f40b057f16af31a24efef064a1eb877e8" Jan 27 13:21:58 crc kubenswrapper[4900]: E0127 13:21:58.992826 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7a9bf969928656016f36a4c50d7df9f40b057f16af31a24efef064a1eb877e8\": container with ID starting with e7a9bf969928656016f36a4c50d7df9f40b057f16af31a24efef064a1eb877e8 not found: ID does not exist" containerID="e7a9bf969928656016f36a4c50d7df9f40b057f16af31a24efef064a1eb877e8" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.992861 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7a9bf969928656016f36a4c50d7df9f40b057f16af31a24efef064a1eb877e8"} err="failed to get container status \"e7a9bf969928656016f36a4c50d7df9f40b057f16af31a24efef064a1eb877e8\": rpc error: code = NotFound desc = could not find container \"e7a9bf969928656016f36a4c50d7df9f40b057f16af31a24efef064a1eb877e8\": container with ID starting with e7a9bf969928656016f36a4c50d7df9f40b057f16af31a24efef064a1eb877e8 not found: ID does not exist" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.992892 4900 scope.go:117] "RemoveContainer" containerID="693b78962f065114b78e7768c6202d8d04531a5ea4cafe1c1bbcf83e01efe3e3" Jan 27 13:21:58 crc kubenswrapper[4900]: E0127 13:21:58.993330 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"693b78962f065114b78e7768c6202d8d04531a5ea4cafe1c1bbcf83e01efe3e3\": container with ID starting with 693b78962f065114b78e7768c6202d8d04531a5ea4cafe1c1bbcf83e01efe3e3 not found: ID does not exist" containerID="693b78962f065114b78e7768c6202d8d04531a5ea4cafe1c1bbcf83e01efe3e3" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.993358 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"693b78962f065114b78e7768c6202d8d04531a5ea4cafe1c1bbcf83e01efe3e3"} err="failed to get container status \"693b78962f065114b78e7768c6202d8d04531a5ea4cafe1c1bbcf83e01efe3e3\": rpc error: code = NotFound desc = could not find container \"693b78962f065114b78e7768c6202d8d04531a5ea4cafe1c1bbcf83e01efe3e3\": container with ID starting with 693b78962f065114b78e7768c6202d8d04531a5ea4cafe1c1bbcf83e01efe3e3 not found: ID does not exist" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.993372 4900 scope.go:117] "RemoveContainer" containerID="6e17b42b1f6fbb4b0518a96dd25690af485393e79446c4d37b557a7cc0a58a90" Jan 27 13:21:58 crc kubenswrapper[4900]: E0127 13:21:58.993683 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e17b42b1f6fbb4b0518a96dd25690af485393e79446c4d37b557a7cc0a58a90\": container with ID starting with 6e17b42b1f6fbb4b0518a96dd25690af485393e79446c4d37b557a7cc0a58a90 not found: ID does not exist" containerID="6e17b42b1f6fbb4b0518a96dd25690af485393e79446c4d37b557a7cc0a58a90" Jan 27 13:21:58 crc kubenswrapper[4900]: I0127 13:21:58.993711 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e17b42b1f6fbb4b0518a96dd25690af485393e79446c4d37b557a7cc0a58a90"} err="failed to get container status \"6e17b42b1f6fbb4b0518a96dd25690af485393e79446c4d37b557a7cc0a58a90\": rpc error: code = NotFound desc = could not find container \"6e17b42b1f6fbb4b0518a96dd25690af485393e79446c4d37b557a7cc0a58a90\": container with ID starting with 6e17b42b1f6fbb4b0518a96dd25690af485393e79446c4d37b557a7cc0a58a90 not found: ID does not exist" Jan 27 13:22:00 crc kubenswrapper[4900]: I0127 13:22:00.513425 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f197dba-63db-49f4-a9c8-877c4acbeabc" path="/var/lib/kubelet/pods/7f197dba-63db-49f4-a9c8-877c4acbeabc/volumes" Jan 27 13:22:10 crc kubenswrapper[4900]: I0127 13:22:10.026041 4900 generic.go:334] "Generic (PLEG): container finished" podID="e20161f2-b4d0-4e63-b7b7-7c359fef99a0" containerID="6ac90d5593c2cc503065122e0e994ed6a2d05715f59fa35688ef2d8996dfa122" exitCode=0 Jan 27 13:22:10 crc kubenswrapper[4900]: I0127 13:22:10.026165 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" event={"ID":"e20161f2-b4d0-4e63-b7b7-7c359fef99a0","Type":"ContainerDied","Data":"6ac90d5593c2cc503065122e0e994ed6a2d05715f59fa35688ef2d8996dfa122"} Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.618456 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.652504 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-telemetry-power-monitoring-combined-ca-bundle\") pod \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.652701 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-2\") pod \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.652745 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-inventory\") pod \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.652786 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-0\") pod \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.652846 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ssh-key-openstack-edpm-ipam\") pod \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.652937 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-1\") pod \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.652984 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76bfn\" (UniqueName: \"kubernetes.io/projected/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-kube-api-access-76bfn\") pod \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\" (UID: \"e20161f2-b4d0-4e63-b7b7-7c359fef99a0\") " Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.661136 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "e20161f2-b4d0-4e63-b7b7-7c359fef99a0" (UID: "e20161f2-b4d0-4e63-b7b7-7c359fef99a0"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.671873 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-kube-api-access-76bfn" (OuterVolumeSpecName: "kube-api-access-76bfn") pod "e20161f2-b4d0-4e63-b7b7-7c359fef99a0" (UID: "e20161f2-b4d0-4e63-b7b7-7c359fef99a0"). InnerVolumeSpecName "kube-api-access-76bfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.703297 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-1" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-1") pod "e20161f2-b4d0-4e63-b7b7-7c359fef99a0" (UID: "e20161f2-b4d0-4e63-b7b7-7c359fef99a0"). InnerVolumeSpecName "ceilometer-ipmi-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.714381 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-0" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-0") pod "e20161f2-b4d0-4e63-b7b7-7c359fef99a0" (UID: "e20161f2-b4d0-4e63-b7b7-7c359fef99a0"). InnerVolumeSpecName "ceilometer-ipmi-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.715971 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "e20161f2-b4d0-4e63-b7b7-7c359fef99a0" (UID: "e20161f2-b4d0-4e63-b7b7-7c359fef99a0"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.731667 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-inventory" (OuterVolumeSpecName: "inventory") pod "e20161f2-b4d0-4e63-b7b7-7c359fef99a0" (UID: "e20161f2-b4d0-4e63-b7b7-7c359fef99a0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.735376 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-2" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-2") pod "e20161f2-b4d0-4e63-b7b7-7c359fef99a0" (UID: "e20161f2-b4d0-4e63-b7b7-7c359fef99a0"). InnerVolumeSpecName "ceilometer-ipmi-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.757497 4900 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.757553 4900 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.757571 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.757582 4900 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.757593 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.757605 4900 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-ceilometer-ipmi-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 27 13:22:11 crc kubenswrapper[4900]: I0127 13:22:11.757615 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76bfn\" (UniqueName: \"kubernetes.io/projected/e20161f2-b4d0-4e63-b7b7-7c359fef99a0-kube-api-access-76bfn\") on node \"crc\" DevicePath \"\"" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.063215 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" event={"ID":"e20161f2-b4d0-4e63-b7b7-7c359fef99a0","Type":"ContainerDied","Data":"6eb123e4d4ed9a0619ebc3353be8f393858b61decdead20735083d8ffcdbb8f5"} Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.063310 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6eb123e4d4ed9a0619ebc3353be8f393858b61decdead20735083d8ffcdbb8f5" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.063455 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.224084 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw"] Jan 27 13:22:12 crc kubenswrapper[4900]: E0127 13:22:12.225009 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e851c54-9261-4aec-88b9-a0beb014d8cd" containerName="registry-server" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.225038 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e851c54-9261-4aec-88b9-a0beb014d8cd" containerName="registry-server" Jan 27 13:22:12 crc kubenswrapper[4900]: E0127 13:22:12.225090 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f197dba-63db-49f4-a9c8-877c4acbeabc" containerName="extract-content" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.225102 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f197dba-63db-49f4-a9c8-877c4acbeabc" containerName="extract-content" Jan 27 13:22:12 crc kubenswrapper[4900]: E0127 13:22:12.225140 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e851c54-9261-4aec-88b9-a0beb014d8cd" containerName="extract-utilities" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.225151 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e851c54-9261-4aec-88b9-a0beb014d8cd" containerName="extract-utilities" Jan 27 13:22:12 crc kubenswrapper[4900]: E0127 13:22:12.225173 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e851c54-9261-4aec-88b9-a0beb014d8cd" containerName="extract-content" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.225180 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e851c54-9261-4aec-88b9-a0beb014d8cd" containerName="extract-content" Jan 27 13:22:12 crc kubenswrapper[4900]: E0127 13:22:12.225196 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30d76435-7dcd-4fad-b4f3-ac2b562d5649" containerName="registry-server" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.225203 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="30d76435-7dcd-4fad-b4f3-ac2b562d5649" containerName="registry-server" Jan 27 13:22:12 crc kubenswrapper[4900]: E0127 13:22:12.225215 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f197dba-63db-49f4-a9c8-877c4acbeabc" containerName="extract-utilities" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.225224 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f197dba-63db-49f4-a9c8-877c4acbeabc" containerName="extract-utilities" Jan 27 13:22:12 crc kubenswrapper[4900]: E0127 13:22:12.225246 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30d76435-7dcd-4fad-b4f3-ac2b562d5649" containerName="extract-content" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.225255 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="30d76435-7dcd-4fad-b4f3-ac2b562d5649" containerName="extract-content" Jan 27 13:22:12 crc kubenswrapper[4900]: E0127 13:22:12.225267 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e20161f2-b4d0-4e63-b7b7-7c359fef99a0" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.225277 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="e20161f2-b4d0-4e63-b7b7-7c359fef99a0" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Jan 27 13:22:12 crc kubenswrapper[4900]: E0127 13:22:12.225306 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f197dba-63db-49f4-a9c8-877c4acbeabc" containerName="registry-server" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.225314 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f197dba-63db-49f4-a9c8-877c4acbeabc" containerName="registry-server" Jan 27 13:22:12 crc kubenswrapper[4900]: E0127 13:22:12.225334 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30d76435-7dcd-4fad-b4f3-ac2b562d5649" containerName="extract-utilities" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.225342 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="30d76435-7dcd-4fad-b4f3-ac2b562d5649" containerName="extract-utilities" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.225654 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f197dba-63db-49f4-a9c8-877c4acbeabc" containerName="registry-server" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.225685 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e851c54-9261-4aec-88b9-a0beb014d8cd" containerName="registry-server" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.225717 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="e20161f2-b4d0-4e63-b7b7-7c359fef99a0" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.225737 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="30d76435-7dcd-4fad-b4f3-ac2b562d5649" containerName="registry-server" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.227130 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.230574 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.231381 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c4qrj" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.231677 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"logging-compute-config-data" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.231850 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.232045 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.253226 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw"] Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.275555 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.276547 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.276693 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9sm7\" (UniqueName: \"kubernetes.io/projected/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-kube-api-access-b9sm7\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.276896 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-ssh-key-openstack-edpm-ipam\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.276954 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.379474 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9sm7\" (UniqueName: \"kubernetes.io/projected/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-kube-api-access-b9sm7\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.379687 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-ssh-key-openstack-edpm-ipam\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.379730 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.379810 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.379970 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.384113 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-ssh-key-openstack-edpm-ipam\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.385558 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.385857 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.388100 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.405998 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9sm7\" (UniqueName: \"kubernetes.io/projected/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-kube-api-access-b9sm7\") pod \"logging-edpm-deployment-openstack-edpm-ipam-fhqzw\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:12 crc kubenswrapper[4900]: I0127 13:22:12.554771 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:13 crc kubenswrapper[4900]: I0127 13:22:13.187597 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw"] Jan 27 13:22:13 crc kubenswrapper[4900]: I0127 13:22:13.202047 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 13:22:14 crc kubenswrapper[4900]: I0127 13:22:14.102160 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" event={"ID":"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd","Type":"ContainerStarted","Data":"e9469b6f24127bd9803ac48fa312b4a2661f5ce083ef6a4f03c767b0e7849388"} Jan 27 13:22:15 crc kubenswrapper[4900]: I0127 13:22:15.123493 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" event={"ID":"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd","Type":"ContainerStarted","Data":"dae47cd3afee2f3bfd03fd0f312781ae0848f60cb303d10750038ed032b94a57"} Jan 27 13:22:15 crc kubenswrapper[4900]: I0127 13:22:15.156564 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" podStartSLOduration=2.570290881 podStartE2EDuration="3.156532763s" podCreationTimestamp="2026-01-27 13:22:12 +0000 UTC" firstStartedPulling="2026-01-27 13:22:13.20175997 +0000 UTC m=+3360.438788180" lastFinishedPulling="2026-01-27 13:22:13.788001862 +0000 UTC m=+3361.025030062" observedRunningTime="2026-01-27 13:22:15.141103124 +0000 UTC m=+3362.378131354" watchObservedRunningTime="2026-01-27 13:22:15.156532763 +0000 UTC m=+3362.393560973" Jan 27 13:22:30 crc kubenswrapper[4900]: I0127 13:22:30.321438 4900 generic.go:334] "Generic (PLEG): container finished" podID="e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd" containerID="dae47cd3afee2f3bfd03fd0f312781ae0848f60cb303d10750038ed032b94a57" exitCode=0 Jan 27 13:22:30 crc kubenswrapper[4900]: I0127 13:22:30.321613 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" event={"ID":"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd","Type":"ContainerDied","Data":"dae47cd3afee2f3bfd03fd0f312781ae0848f60cb303d10750038ed032b94a57"} Jan 27 13:22:31 crc kubenswrapper[4900]: I0127 13:22:31.901478 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.020670 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-logging-compute-config-data-0\") pod \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.020906 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-ssh-key-openstack-edpm-ipam\") pod \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.021311 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-inventory\") pod \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.021415 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-logging-compute-config-data-1\") pod \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.021525 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9sm7\" (UniqueName: \"kubernetes.io/projected/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-kube-api-access-b9sm7\") pod \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\" (UID: \"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd\") " Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.032247 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-kube-api-access-b9sm7" (OuterVolumeSpecName: "kube-api-access-b9sm7") pod "e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd" (UID: "e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd"). InnerVolumeSpecName "kube-api-access-b9sm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.067243 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-logging-compute-config-data-0" (OuterVolumeSpecName: "logging-compute-config-data-0") pod "e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd" (UID: "e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd"). InnerVolumeSpecName "logging-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.067746 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-inventory" (OuterVolumeSpecName: "inventory") pod "e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd" (UID: "e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.085398 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd" (UID: "e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.096873 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-logging-compute-config-data-1" (OuterVolumeSpecName: "logging-compute-config-data-1") pod "e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd" (UID: "e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd"). InnerVolumeSpecName "logging-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.126178 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.126242 4900 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-inventory\") on node \"crc\" DevicePath \"\"" Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.126261 4900 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-logging-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.126279 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9sm7\" (UniqueName: \"kubernetes.io/projected/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-kube-api-access-b9sm7\") on node \"crc\" DevicePath \"\"" Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.126298 4900 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd-logging-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.350665 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" event={"ID":"e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd","Type":"ContainerDied","Data":"e9469b6f24127bd9803ac48fa312b4a2661f5ce083ef6a4f03c767b0e7849388"} Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.350748 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e9469b6f24127bd9803ac48fa312b4a2661f5ce083ef6a4f03c767b0e7849388" Jan 27 13:22:32 crc kubenswrapper[4900]: I0127 13:22:32.350872 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-fhqzw" Jan 27 13:22:52 crc kubenswrapper[4900]: I0127 13:22:52.373568 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:22:52 crc kubenswrapper[4900]: I0127 13:22:52.374845 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:23:22 crc kubenswrapper[4900]: I0127 13:23:22.373371 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:23:22 crc kubenswrapper[4900]: I0127 13:23:22.374389 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:23:52 crc kubenswrapper[4900]: I0127 13:23:52.372903 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:23:52 crc kubenswrapper[4900]: I0127 13:23:52.375417 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:23:52 crc kubenswrapper[4900]: I0127 13:23:52.375688 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 13:23:52 crc kubenswrapper[4900]: I0127 13:23:52.377438 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c597ab2a68a79b51e2bb511022ae0fb5a8221544d64818accf88cd1d6f181d1f"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 13:23:52 crc kubenswrapper[4900]: I0127 13:23:52.377744 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://c597ab2a68a79b51e2bb511022ae0fb5a8221544d64818accf88cd1d6f181d1f" gracePeriod=600 Jan 27 13:23:52 crc kubenswrapper[4900]: I0127 13:23:52.722439 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="c597ab2a68a79b51e2bb511022ae0fb5a8221544d64818accf88cd1d6f181d1f" exitCode=0 Jan 27 13:23:52 crc kubenswrapper[4900]: I0127 13:23:52.722519 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"c597ab2a68a79b51e2bb511022ae0fb5a8221544d64818accf88cd1d6f181d1f"} Jan 27 13:23:52 crc kubenswrapper[4900]: I0127 13:23:52.723113 4900 scope.go:117] "RemoveContainer" containerID="5bc7c9a9a97dfc0efdf0101dc30a48bf2ede1c8787a5c7205e0bcc904b528579" Jan 27 13:23:53 crc kubenswrapper[4900]: I0127 13:23:53.746841 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11"} Jan 27 13:25:52 crc kubenswrapper[4900]: I0127 13:25:52.372284 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:25:52 crc kubenswrapper[4900]: I0127 13:25:52.373126 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:26:22 crc kubenswrapper[4900]: I0127 13:26:22.372581 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:26:22 crc kubenswrapper[4900]: I0127 13:26:22.373189 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:26:52 crc kubenswrapper[4900]: I0127 13:26:52.373242 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:26:52 crc kubenswrapper[4900]: I0127 13:26:52.374136 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:26:52 crc kubenswrapper[4900]: I0127 13:26:52.374224 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 13:26:52 crc kubenswrapper[4900]: I0127 13:26:52.376141 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 13:26:52 crc kubenswrapper[4900]: I0127 13:26:52.376260 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" gracePeriod=600 Jan 27 13:26:52 crc kubenswrapper[4900]: E0127 13:26:52.578890 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:26:52 crc kubenswrapper[4900]: I0127 13:26:52.970706 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" exitCode=0 Jan 27 13:26:52 crc kubenswrapper[4900]: I0127 13:26:52.970764 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11"} Jan 27 13:26:52 crc kubenswrapper[4900]: I0127 13:26:52.970834 4900 scope.go:117] "RemoveContainer" containerID="c597ab2a68a79b51e2bb511022ae0fb5a8221544d64818accf88cd1d6f181d1f" Jan 27 13:26:52 crc kubenswrapper[4900]: I0127 13:26:52.972091 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:26:52 crc kubenswrapper[4900]: E0127 13:26:52.972611 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:27:04 crc kubenswrapper[4900]: I0127 13:27:04.487919 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:27:04 crc kubenswrapper[4900]: E0127 13:27:04.488905 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:27:19 crc kubenswrapper[4900]: I0127 13:27:19.482694 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:27:19 crc kubenswrapper[4900]: E0127 13:27:19.483757 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:27:34 crc kubenswrapper[4900]: I0127 13:27:34.483420 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:27:34 crc kubenswrapper[4900]: E0127 13:27:34.484482 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:27:45 crc kubenswrapper[4900]: I0127 13:27:45.483716 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:27:45 crc kubenswrapper[4900]: E0127 13:27:45.484863 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:27:59 crc kubenswrapper[4900]: I0127 13:27:59.482708 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:27:59 crc kubenswrapper[4900]: E0127 13:27:59.483657 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:28:05 crc kubenswrapper[4900]: E0127 13:28:05.568072 4900 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.220:37742->38.102.83.220:38021: read tcp 38.102.83.220:37742->38.102.83.220:38021: read: connection reset by peer Jan 27 13:28:05 crc kubenswrapper[4900]: E0127 13:28:05.568803 4900 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.220:37742->38.102.83.220:38021: write tcp 38.102.83.220:37742->38.102.83.220:38021: write: broken pipe Jan 27 13:28:10 crc kubenswrapper[4900]: I0127 13:28:10.482127 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:28:10 crc kubenswrapper[4900]: E0127 13:28:10.483141 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:28:24 crc kubenswrapper[4900]: I0127 13:28:24.487006 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:28:24 crc kubenswrapper[4900]: E0127 13:28:24.490007 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:28:36 crc kubenswrapper[4900]: I0127 13:28:36.493913 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:28:36 crc kubenswrapper[4900]: E0127 13:28:36.494998 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:28:51 crc kubenswrapper[4900]: I0127 13:28:51.483437 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:28:51 crc kubenswrapper[4900]: E0127 13:28:51.484401 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:29:04 crc kubenswrapper[4900]: I0127 13:29:04.482567 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:29:04 crc kubenswrapper[4900]: E0127 13:29:04.483468 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:29:19 crc kubenswrapper[4900]: I0127 13:29:19.483049 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:29:19 crc kubenswrapper[4900]: E0127 13:29:19.484157 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:29:32 crc kubenswrapper[4900]: I0127 13:29:32.482891 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:29:32 crc kubenswrapper[4900]: E0127 13:29:32.484155 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:29:46 crc kubenswrapper[4900]: I0127 13:29:46.492070 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:29:46 crc kubenswrapper[4900]: E0127 13:29:46.494917 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.188594 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v"] Jan 27 13:30:00 crc kubenswrapper[4900]: E0127 13:30:00.190486 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd" containerName="logging-edpm-deployment-openstack-edpm-ipam" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.190513 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd" containerName="logging-edpm-deployment-openstack-edpm-ipam" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.190878 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd" containerName="logging-edpm-deployment-openstack-edpm-ipam" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.192207 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.197488 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.197508 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.206179 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v"] Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.226416 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-secret-volume\") pod \"collect-profiles-29492010-rv66v\" (UID: \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.226573 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4txx4\" (UniqueName: \"kubernetes.io/projected/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-kube-api-access-4txx4\") pod \"collect-profiles-29492010-rv66v\" (UID: \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.226692 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-config-volume\") pod \"collect-profiles-29492010-rv66v\" (UID: \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.329530 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-secret-volume\") pod \"collect-profiles-29492010-rv66v\" (UID: \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.329739 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4txx4\" (UniqueName: \"kubernetes.io/projected/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-kube-api-access-4txx4\") pod \"collect-profiles-29492010-rv66v\" (UID: \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.329893 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-config-volume\") pod \"collect-profiles-29492010-rv66v\" (UID: \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.331629 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-config-volume\") pod \"collect-profiles-29492010-rv66v\" (UID: \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.345134 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-secret-volume\") pod \"collect-profiles-29492010-rv66v\" (UID: \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.351071 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4txx4\" (UniqueName: \"kubernetes.io/projected/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-kube-api-access-4txx4\") pod \"collect-profiles-29492010-rv66v\" (UID: \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" Jan 27 13:30:00 crc kubenswrapper[4900]: I0127 13:30:00.516970 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" Jan 27 13:30:01 crc kubenswrapper[4900]: I0127 13:30:01.113482 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v"] Jan 27 13:30:01 crc kubenswrapper[4900]: I0127 13:30:01.199585 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" event={"ID":"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7","Type":"ContainerStarted","Data":"2327c381d7fcb76cc81ec078e28d791a90afb3a7a79cc41b113b49dfc32663ff"} Jan 27 13:30:01 crc kubenswrapper[4900]: I0127 13:30:01.481691 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:30:01 crc kubenswrapper[4900]: E0127 13:30:01.482465 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:30:02 crc kubenswrapper[4900]: I0127 13:30:02.273676 4900 generic.go:334] "Generic (PLEG): container finished" podID="2730bd0a-eabd-4ea7-af71-6f1fccaf1df7" containerID="05e8eaad578f017059ac01ec72bd8e8fffe2a7a65db0157ee25669d2f2f9d3ab" exitCode=0 Jan 27 13:30:02 crc kubenswrapper[4900]: I0127 13:30:02.273855 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" event={"ID":"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7","Type":"ContainerDied","Data":"05e8eaad578f017059ac01ec72bd8e8fffe2a7a65db0157ee25669d2f2f9d3ab"} Jan 27 13:30:03 crc kubenswrapper[4900]: I0127 13:30:03.829050 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" Jan 27 13:30:03 crc kubenswrapper[4900]: I0127 13:30:03.982586 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-secret-volume\") pod \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\" (UID: \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\") " Jan 27 13:30:03 crc kubenswrapper[4900]: I0127 13:30:03.982855 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-config-volume\") pod \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\" (UID: \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\") " Jan 27 13:30:03 crc kubenswrapper[4900]: I0127 13:30:03.984546 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-config-volume" (OuterVolumeSpecName: "config-volume") pod "2730bd0a-eabd-4ea7-af71-6f1fccaf1df7" (UID: "2730bd0a-eabd-4ea7-af71-6f1fccaf1df7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 13:30:03 crc kubenswrapper[4900]: I0127 13:30:03.984877 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4txx4\" (UniqueName: \"kubernetes.io/projected/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-kube-api-access-4txx4\") pod \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\" (UID: \"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7\") " Jan 27 13:30:03 crc kubenswrapper[4900]: I0127 13:30:03.987569 4900 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 13:30:03 crc kubenswrapper[4900]: I0127 13:30:03.991686 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-kube-api-access-4txx4" (OuterVolumeSpecName: "kube-api-access-4txx4") pod "2730bd0a-eabd-4ea7-af71-6f1fccaf1df7" (UID: "2730bd0a-eabd-4ea7-af71-6f1fccaf1df7"). InnerVolumeSpecName "kube-api-access-4txx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:30:04 crc kubenswrapper[4900]: I0127 13:30:04.004699 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2730bd0a-eabd-4ea7-af71-6f1fccaf1df7" (UID: "2730bd0a-eabd-4ea7-af71-6f1fccaf1df7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:30:04 crc kubenswrapper[4900]: I0127 13:30:04.093857 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4txx4\" (UniqueName: \"kubernetes.io/projected/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-kube-api-access-4txx4\") on node \"crc\" DevicePath \"\"" Jan 27 13:30:04 crc kubenswrapper[4900]: I0127 13:30:04.093908 4900 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 13:30:04 crc kubenswrapper[4900]: I0127 13:30:04.306270 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" event={"ID":"2730bd0a-eabd-4ea7-af71-6f1fccaf1df7","Type":"ContainerDied","Data":"2327c381d7fcb76cc81ec078e28d791a90afb3a7a79cc41b113b49dfc32663ff"} Jan 27 13:30:04 crc kubenswrapper[4900]: I0127 13:30:04.306791 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2327c381d7fcb76cc81ec078e28d791a90afb3a7a79cc41b113b49dfc32663ff" Jan 27 13:30:04 crc kubenswrapper[4900]: I0127 13:30:04.306351 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v" Jan 27 13:30:04 crc kubenswrapper[4900]: I0127 13:30:04.940895 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4"] Jan 27 13:30:04 crc kubenswrapper[4900]: I0127 13:30:04.957934 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491965-xptv4"] Jan 27 13:30:06 crc kubenswrapper[4900]: I0127 13:30:06.502024 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f207e2d8-e4bc-4953-881f-730cff507dac" path="/var/lib/kubelet/pods/f207e2d8-e4bc-4953-881f-730cff507dac/volumes" Jan 27 13:30:12 crc kubenswrapper[4900]: I0127 13:30:12.482143 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:30:12 crc kubenswrapper[4900]: E0127 13:30:12.483455 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:30:23 crc kubenswrapper[4900]: I0127 13:30:23.482296 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:30:23 crc kubenswrapper[4900]: E0127 13:30:23.483426 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:30:24 crc kubenswrapper[4900]: I0127 13:30:24.498029 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9p8nl"] Jan 27 13:30:24 crc kubenswrapper[4900]: E0127 13:30:24.499025 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2730bd0a-eabd-4ea7-af71-6f1fccaf1df7" containerName="collect-profiles" Jan 27 13:30:24 crc kubenswrapper[4900]: I0127 13:30:24.499041 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="2730bd0a-eabd-4ea7-af71-6f1fccaf1df7" containerName="collect-profiles" Jan 27 13:30:24 crc kubenswrapper[4900]: I0127 13:30:24.499328 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="2730bd0a-eabd-4ea7-af71-6f1fccaf1df7" containerName="collect-profiles" Jan 27 13:30:24 crc kubenswrapper[4900]: I0127 13:30:24.501345 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:24 crc kubenswrapper[4900]: I0127 13:30:24.504141 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9p8nl"] Jan 27 13:30:24 crc kubenswrapper[4900]: I0127 13:30:24.690667 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ed6ad7b-a315-4755-9915-7c754ceb13cf-utilities\") pod \"redhat-operators-9p8nl\" (UID: \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\") " pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:24 crc kubenswrapper[4900]: I0127 13:30:24.690728 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq5xg\" (UniqueName: \"kubernetes.io/projected/6ed6ad7b-a315-4755-9915-7c754ceb13cf-kube-api-access-xq5xg\") pod \"redhat-operators-9p8nl\" (UID: \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\") " pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:24 crc kubenswrapper[4900]: I0127 13:30:24.690774 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ed6ad7b-a315-4755-9915-7c754ceb13cf-catalog-content\") pod \"redhat-operators-9p8nl\" (UID: \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\") " pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:24 crc kubenswrapper[4900]: I0127 13:30:24.794314 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ed6ad7b-a315-4755-9915-7c754ceb13cf-utilities\") pod \"redhat-operators-9p8nl\" (UID: \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\") " pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:24 crc kubenswrapper[4900]: I0127 13:30:24.794822 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq5xg\" (UniqueName: \"kubernetes.io/projected/6ed6ad7b-a315-4755-9915-7c754ceb13cf-kube-api-access-xq5xg\") pod \"redhat-operators-9p8nl\" (UID: \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\") " pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:24 crc kubenswrapper[4900]: I0127 13:30:24.794967 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ed6ad7b-a315-4755-9915-7c754ceb13cf-catalog-content\") pod \"redhat-operators-9p8nl\" (UID: \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\") " pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:24 crc kubenswrapper[4900]: I0127 13:30:24.794991 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ed6ad7b-a315-4755-9915-7c754ceb13cf-utilities\") pod \"redhat-operators-9p8nl\" (UID: \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\") " pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:24 crc kubenswrapper[4900]: I0127 13:30:24.796107 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ed6ad7b-a315-4755-9915-7c754ceb13cf-catalog-content\") pod \"redhat-operators-9p8nl\" (UID: \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\") " pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:24 crc kubenswrapper[4900]: I0127 13:30:24.849126 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq5xg\" (UniqueName: \"kubernetes.io/projected/6ed6ad7b-a315-4755-9915-7c754ceb13cf-kube-api-access-xq5xg\") pod \"redhat-operators-9p8nl\" (UID: \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\") " pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:25 crc kubenswrapper[4900]: I0127 13:30:25.131634 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:25 crc kubenswrapper[4900]: I0127 13:30:25.768593 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9p8nl"] Jan 27 13:30:25 crc kubenswrapper[4900]: W0127 13:30:25.803660 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6ed6ad7b_a315_4755_9915_7c754ceb13cf.slice/crio-b24bd0d1edc4819c78938e51575ade3fb29a25a34df47051c8a9ae4c5a9f409a WatchSource:0}: Error finding container b24bd0d1edc4819c78938e51575ade3fb29a25a34df47051c8a9ae4c5a9f409a: Status 404 returned error can't find the container with id b24bd0d1edc4819c78938e51575ade3fb29a25a34df47051c8a9ae4c5a9f409a Jan 27 13:30:26 crc kubenswrapper[4900]: I0127 13:30:26.654644 4900 generic.go:334] "Generic (PLEG): container finished" podID="6ed6ad7b-a315-4755-9915-7c754ceb13cf" containerID="d282f26ef3c6cf195fcf605a31ff86b403bcd945b5704b1fb7f66eefbb28c889" exitCode=0 Jan 27 13:30:26 crc kubenswrapper[4900]: I0127 13:30:26.654787 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9p8nl" event={"ID":"6ed6ad7b-a315-4755-9915-7c754ceb13cf","Type":"ContainerDied","Data":"d282f26ef3c6cf195fcf605a31ff86b403bcd945b5704b1fb7f66eefbb28c889"} Jan 27 13:30:26 crc kubenswrapper[4900]: I0127 13:30:26.655557 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9p8nl" event={"ID":"6ed6ad7b-a315-4755-9915-7c754ceb13cf","Type":"ContainerStarted","Data":"b24bd0d1edc4819c78938e51575ade3fb29a25a34df47051c8a9ae4c5a9f409a"} Jan 27 13:30:26 crc kubenswrapper[4900]: I0127 13:30:26.676369 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 13:30:29 crc kubenswrapper[4900]: I0127 13:30:29.714601 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9p8nl" event={"ID":"6ed6ad7b-a315-4755-9915-7c754ceb13cf","Type":"ContainerStarted","Data":"c5932be93d1045b3fedbbbdb0f21b6f4b1267924b09a274bd054b6084272eee9"} Jan 27 13:30:36 crc kubenswrapper[4900]: I0127 13:30:36.491934 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:30:36 crc kubenswrapper[4900]: E0127 13:30:36.494488 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:30:41 crc kubenswrapper[4900]: I0127 13:30:41.464237 4900 scope.go:117] "RemoveContainer" containerID="acca4490f01236a385baf7895d9a3611d6836e79edebf08b5adef8752fbbc6f7" Jan 27 13:30:43 crc kubenswrapper[4900]: I0127 13:30:43.935523 4900 generic.go:334] "Generic (PLEG): container finished" podID="6ed6ad7b-a315-4755-9915-7c754ceb13cf" containerID="c5932be93d1045b3fedbbbdb0f21b6f4b1267924b09a274bd054b6084272eee9" exitCode=0 Jan 27 13:30:43 crc kubenswrapper[4900]: I0127 13:30:43.935610 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9p8nl" event={"ID":"6ed6ad7b-a315-4755-9915-7c754ceb13cf","Type":"ContainerDied","Data":"c5932be93d1045b3fedbbbdb0f21b6f4b1267924b09a274bd054b6084272eee9"} Jan 27 13:30:45 crc kubenswrapper[4900]: I0127 13:30:45.976844 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9p8nl" event={"ID":"6ed6ad7b-a315-4755-9915-7c754ceb13cf","Type":"ContainerStarted","Data":"3b9f4aacf57868ac0ac6621339d740cbc54fab05810e39d31e5eec235bb12935"} Jan 27 13:30:46 crc kubenswrapper[4900]: I0127 13:30:46.000452 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9p8nl" podStartSLOduration=4.024267969 podStartE2EDuration="22.000433516s" podCreationTimestamp="2026-01-27 13:30:24 +0000 UTC" firstStartedPulling="2026-01-27 13:30:26.675833334 +0000 UTC m=+3853.912861544" lastFinishedPulling="2026-01-27 13:30:44.651998881 +0000 UTC m=+3871.889027091" observedRunningTime="2026-01-27 13:30:45.998711997 +0000 UTC m=+3873.235740197" watchObservedRunningTime="2026-01-27 13:30:46.000433516 +0000 UTC m=+3873.237461726" Jan 27 13:30:51 crc kubenswrapper[4900]: I0127 13:30:51.483430 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:30:51 crc kubenswrapper[4900]: E0127 13:30:51.484805 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:30:55 crc kubenswrapper[4900]: I0127 13:30:55.132302 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:55 crc kubenswrapper[4900]: I0127 13:30:55.132989 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:55 crc kubenswrapper[4900]: I0127 13:30:55.215177 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:56 crc kubenswrapper[4900]: I0127 13:30:56.172298 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:56 crc kubenswrapper[4900]: I0127 13:30:56.255910 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9p8nl"] Jan 27 13:30:58 crc kubenswrapper[4900]: I0127 13:30:58.143239 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9p8nl" podUID="6ed6ad7b-a315-4755-9915-7c754ceb13cf" containerName="registry-server" containerID="cri-o://3b9f4aacf57868ac0ac6621339d740cbc54fab05810e39d31e5eec235bb12935" gracePeriod=2 Jan 27 13:30:59 crc kubenswrapper[4900]: I0127 13:30:59.164099 4900 generic.go:334] "Generic (PLEG): container finished" podID="6ed6ad7b-a315-4755-9915-7c754ceb13cf" containerID="3b9f4aacf57868ac0ac6621339d740cbc54fab05810e39d31e5eec235bb12935" exitCode=0 Jan 27 13:30:59 crc kubenswrapper[4900]: I0127 13:30:59.164487 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9p8nl" event={"ID":"6ed6ad7b-a315-4755-9915-7c754ceb13cf","Type":"ContainerDied","Data":"3b9f4aacf57868ac0ac6621339d740cbc54fab05810e39d31e5eec235bb12935"} Jan 27 13:30:59 crc kubenswrapper[4900]: I0127 13:30:59.534659 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:30:59 crc kubenswrapper[4900]: I0127 13:30:59.668912 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ed6ad7b-a315-4755-9915-7c754ceb13cf-catalog-content\") pod \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\" (UID: \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\") " Jan 27 13:30:59 crc kubenswrapper[4900]: I0127 13:30:59.669521 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xq5xg\" (UniqueName: \"kubernetes.io/projected/6ed6ad7b-a315-4755-9915-7c754ceb13cf-kube-api-access-xq5xg\") pod \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\" (UID: \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\") " Jan 27 13:30:59 crc kubenswrapper[4900]: I0127 13:30:59.669729 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ed6ad7b-a315-4755-9915-7c754ceb13cf-utilities\") pod \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\" (UID: \"6ed6ad7b-a315-4755-9915-7c754ceb13cf\") " Jan 27 13:30:59 crc kubenswrapper[4900]: I0127 13:30:59.671319 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ed6ad7b-a315-4755-9915-7c754ceb13cf-utilities" (OuterVolumeSpecName: "utilities") pod "6ed6ad7b-a315-4755-9915-7c754ceb13cf" (UID: "6ed6ad7b-a315-4755-9915-7c754ceb13cf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:30:59 crc kubenswrapper[4900]: I0127 13:30:59.672431 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ed6ad7b-a315-4755-9915-7c754ceb13cf-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:30:59 crc kubenswrapper[4900]: I0127 13:30:59.707149 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ed6ad7b-a315-4755-9915-7c754ceb13cf-kube-api-access-xq5xg" (OuterVolumeSpecName: "kube-api-access-xq5xg") pod "6ed6ad7b-a315-4755-9915-7c754ceb13cf" (UID: "6ed6ad7b-a315-4755-9915-7c754ceb13cf"). InnerVolumeSpecName "kube-api-access-xq5xg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:30:59 crc kubenswrapper[4900]: I0127 13:30:59.778974 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xq5xg\" (UniqueName: \"kubernetes.io/projected/6ed6ad7b-a315-4755-9915-7c754ceb13cf-kube-api-access-xq5xg\") on node \"crc\" DevicePath \"\"" Jan 27 13:30:59 crc kubenswrapper[4900]: I0127 13:30:59.879936 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ed6ad7b-a315-4755-9915-7c754ceb13cf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6ed6ad7b-a315-4755-9915-7c754ceb13cf" (UID: "6ed6ad7b-a315-4755-9915-7c754ceb13cf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:30:59 crc kubenswrapper[4900]: I0127 13:30:59.891170 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ed6ad7b-a315-4755-9915-7c754ceb13cf-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:31:00 crc kubenswrapper[4900]: I0127 13:31:00.192233 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9p8nl" event={"ID":"6ed6ad7b-a315-4755-9915-7c754ceb13cf","Type":"ContainerDied","Data":"b24bd0d1edc4819c78938e51575ade3fb29a25a34df47051c8a9ae4c5a9f409a"} Jan 27 13:31:00 crc kubenswrapper[4900]: I0127 13:31:00.192306 4900 scope.go:117] "RemoveContainer" containerID="3b9f4aacf57868ac0ac6621339d740cbc54fab05810e39d31e5eec235bb12935" Jan 27 13:31:00 crc kubenswrapper[4900]: I0127 13:31:00.192521 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9p8nl" Jan 27 13:31:00 crc kubenswrapper[4900]: I0127 13:31:00.233523 4900 scope.go:117] "RemoveContainer" containerID="c5932be93d1045b3fedbbbdb0f21b6f4b1267924b09a274bd054b6084272eee9" Jan 27 13:31:00 crc kubenswrapper[4900]: I0127 13:31:00.246000 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9p8nl"] Jan 27 13:31:00 crc kubenswrapper[4900]: I0127 13:31:00.264705 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9p8nl"] Jan 27 13:31:00 crc kubenswrapper[4900]: I0127 13:31:00.337729 4900 scope.go:117] "RemoveContainer" containerID="d282f26ef3c6cf195fcf605a31ff86b403bcd945b5704b1fb7f66eefbb28c889" Jan 27 13:31:00 crc kubenswrapper[4900]: I0127 13:31:00.505967 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ed6ad7b-a315-4755-9915-7c754ceb13cf" path="/var/lib/kubelet/pods/6ed6ad7b-a315-4755-9915-7c754ceb13cf/volumes" Jan 27 13:31:04 crc kubenswrapper[4900]: I0127 13:31:04.483567 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:31:04 crc kubenswrapper[4900]: E0127 13:31:04.484722 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:31:18 crc kubenswrapper[4900]: I0127 13:31:18.483742 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:31:18 crc kubenswrapper[4900]: E0127 13:31:18.484954 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:31:31 crc kubenswrapper[4900]: I0127 13:31:31.483496 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:31:31 crc kubenswrapper[4900]: E0127 13:31:31.484924 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:31:45 crc kubenswrapper[4900]: I0127 13:31:45.482890 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:31:45 crc kubenswrapper[4900]: E0127 13:31:45.484403 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:31:58 crc kubenswrapper[4900]: I0127 13:31:58.485707 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:31:59 crc kubenswrapper[4900]: I0127 13:31:59.136335 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"5ce944e0655d1f982b63e8219b9d6bf0e4b923dfb5eb0c1a7ce20dd9d608fd8f"} Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.038750 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dkmsz"] Jan 27 13:32:31 crc kubenswrapper[4900]: E0127 13:32:31.042048 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ed6ad7b-a315-4755-9915-7c754ceb13cf" containerName="extract-content" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.042095 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ed6ad7b-a315-4755-9915-7c754ceb13cf" containerName="extract-content" Jan 27 13:32:31 crc kubenswrapper[4900]: E0127 13:32:31.042137 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ed6ad7b-a315-4755-9915-7c754ceb13cf" containerName="extract-utilities" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.042145 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ed6ad7b-a315-4755-9915-7c754ceb13cf" containerName="extract-utilities" Jan 27 13:32:31 crc kubenswrapper[4900]: E0127 13:32:31.042155 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ed6ad7b-a315-4755-9915-7c754ceb13cf" containerName="registry-server" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.042163 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ed6ad7b-a315-4755-9915-7c754ceb13cf" containerName="registry-server" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.042559 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ed6ad7b-a315-4755-9915-7c754ceb13cf" containerName="registry-server" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.047035 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.049711 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhl6z\" (UniqueName: \"kubernetes.io/projected/d6060c2f-7323-4d7a-9278-500fae84459b-kube-api-access-nhl6z\") pod \"certified-operators-dkmsz\" (UID: \"d6060c2f-7323-4d7a-9278-500fae84459b\") " pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.049973 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d6060c2f-7323-4d7a-9278-500fae84459b-utilities\") pod \"certified-operators-dkmsz\" (UID: \"d6060c2f-7323-4d7a-9278-500fae84459b\") " pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.050081 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d6060c2f-7323-4d7a-9278-500fae84459b-catalog-content\") pod \"certified-operators-dkmsz\" (UID: \"d6060c2f-7323-4d7a-9278-500fae84459b\") " pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.087656 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dkmsz"] Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.153328 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhl6z\" (UniqueName: \"kubernetes.io/projected/d6060c2f-7323-4d7a-9278-500fae84459b-kube-api-access-nhl6z\") pod \"certified-operators-dkmsz\" (UID: \"d6060c2f-7323-4d7a-9278-500fae84459b\") " pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.153496 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d6060c2f-7323-4d7a-9278-500fae84459b-utilities\") pod \"certified-operators-dkmsz\" (UID: \"d6060c2f-7323-4d7a-9278-500fae84459b\") " pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.153546 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d6060c2f-7323-4d7a-9278-500fae84459b-catalog-content\") pod \"certified-operators-dkmsz\" (UID: \"d6060c2f-7323-4d7a-9278-500fae84459b\") " pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.154260 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d6060c2f-7323-4d7a-9278-500fae84459b-utilities\") pod \"certified-operators-dkmsz\" (UID: \"d6060c2f-7323-4d7a-9278-500fae84459b\") " pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.154345 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d6060c2f-7323-4d7a-9278-500fae84459b-catalog-content\") pod \"certified-operators-dkmsz\" (UID: \"d6060c2f-7323-4d7a-9278-500fae84459b\") " pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.181247 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhl6z\" (UniqueName: \"kubernetes.io/projected/d6060c2f-7323-4d7a-9278-500fae84459b-kube-api-access-nhl6z\") pod \"certified-operators-dkmsz\" (UID: \"d6060c2f-7323-4d7a-9278-500fae84459b\") " pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:31 crc kubenswrapper[4900]: I0127 13:32:31.397438 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:32 crc kubenswrapper[4900]: I0127 13:32:32.086644 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dkmsz"] Jan 27 13:32:32 crc kubenswrapper[4900]: W0127 13:32:32.092955 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd6060c2f_7323_4d7a_9278_500fae84459b.slice/crio-f2b7ff5284e6c641104ae343cd2899524e560fcebed7c611f899bdf293556b0b WatchSource:0}: Error finding container f2b7ff5284e6c641104ae343cd2899524e560fcebed7c611f899bdf293556b0b: Status 404 returned error can't find the container with id f2b7ff5284e6c641104ae343cd2899524e560fcebed7c611f899bdf293556b0b Jan 27 13:32:32 crc kubenswrapper[4900]: I0127 13:32:32.673408 4900 generic.go:334] "Generic (PLEG): container finished" podID="d6060c2f-7323-4d7a-9278-500fae84459b" containerID="d408b99d186d4e9ec82b08b293cd824ed539a9b190e2ae1b3971f2d180cd7fa4" exitCode=0 Jan 27 13:32:32 crc kubenswrapper[4900]: I0127 13:32:32.673828 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dkmsz" event={"ID":"d6060c2f-7323-4d7a-9278-500fae84459b","Type":"ContainerDied","Data":"d408b99d186d4e9ec82b08b293cd824ed539a9b190e2ae1b3971f2d180cd7fa4"} Jan 27 13:32:32 crc kubenswrapper[4900]: I0127 13:32:32.674007 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dkmsz" event={"ID":"d6060c2f-7323-4d7a-9278-500fae84459b","Type":"ContainerStarted","Data":"f2b7ff5284e6c641104ae343cd2899524e560fcebed7c611f899bdf293556b0b"} Jan 27 13:32:37 crc kubenswrapper[4900]: I0127 13:32:37.752042 4900 generic.go:334] "Generic (PLEG): container finished" podID="d6060c2f-7323-4d7a-9278-500fae84459b" containerID="781da09f46cfd701e1e3a03c61778e0083e581e024165714b3ea8536ef48b2a5" exitCode=0 Jan 27 13:32:37 crc kubenswrapper[4900]: I0127 13:32:37.752319 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dkmsz" event={"ID":"d6060c2f-7323-4d7a-9278-500fae84459b","Type":"ContainerDied","Data":"781da09f46cfd701e1e3a03c61778e0083e581e024165714b3ea8536ef48b2a5"} Jan 27 13:32:38 crc kubenswrapper[4900]: I0127 13:32:38.769518 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dkmsz" event={"ID":"d6060c2f-7323-4d7a-9278-500fae84459b","Type":"ContainerStarted","Data":"edd21d28004b5a9f9dbd9e6f6d9c3386fc28c9fd7756de60b7fc480d78985f2f"} Jan 27 13:32:38 crc kubenswrapper[4900]: I0127 13:32:38.806690 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dkmsz" podStartSLOduration=2.074630282 podStartE2EDuration="7.806663064s" podCreationTimestamp="2026-01-27 13:32:31 +0000 UTC" firstStartedPulling="2026-01-27 13:32:32.676138845 +0000 UTC m=+3979.913167055" lastFinishedPulling="2026-01-27 13:32:38.408171607 +0000 UTC m=+3985.645199837" observedRunningTime="2026-01-27 13:32:38.791091786 +0000 UTC m=+3986.028120006" watchObservedRunningTime="2026-01-27 13:32:38.806663064 +0000 UTC m=+3986.043691274" Jan 27 13:32:41 crc kubenswrapper[4900]: I0127 13:32:41.398496 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:41 crc kubenswrapper[4900]: I0127 13:32:41.399871 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:41 crc kubenswrapper[4900]: I0127 13:32:41.465250 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:49 crc kubenswrapper[4900]: I0127 13:32:49.664139 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bch54"] Jan 27 13:32:49 crc kubenswrapper[4900]: I0127 13:32:49.675476 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bch54" Jan 27 13:32:49 crc kubenswrapper[4900]: I0127 13:32:49.737401 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bch54"] Jan 27 13:32:49 crc kubenswrapper[4900]: I0127 13:32:49.773814 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dc9cd4a-90da-42cf-ab01-df00215c34a1-utilities\") pod \"community-operators-bch54\" (UID: \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\") " pod="openshift-marketplace/community-operators-bch54" Jan 27 13:32:49 crc kubenswrapper[4900]: I0127 13:32:49.774356 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmwnq\" (UniqueName: \"kubernetes.io/projected/3dc9cd4a-90da-42cf-ab01-df00215c34a1-kube-api-access-zmwnq\") pod \"community-operators-bch54\" (UID: \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\") " pod="openshift-marketplace/community-operators-bch54" Jan 27 13:32:49 crc kubenswrapper[4900]: I0127 13:32:49.774432 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dc9cd4a-90da-42cf-ab01-df00215c34a1-catalog-content\") pod \"community-operators-bch54\" (UID: \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\") " pod="openshift-marketplace/community-operators-bch54" Jan 27 13:32:49 crc kubenswrapper[4900]: I0127 13:32:49.879844 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dc9cd4a-90da-42cf-ab01-df00215c34a1-utilities\") pod \"community-operators-bch54\" (UID: \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\") " pod="openshift-marketplace/community-operators-bch54" Jan 27 13:32:49 crc kubenswrapper[4900]: I0127 13:32:49.880094 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmwnq\" (UniqueName: \"kubernetes.io/projected/3dc9cd4a-90da-42cf-ab01-df00215c34a1-kube-api-access-zmwnq\") pod \"community-operators-bch54\" (UID: \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\") " pod="openshift-marketplace/community-operators-bch54" Jan 27 13:32:49 crc kubenswrapper[4900]: I0127 13:32:49.880138 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dc9cd4a-90da-42cf-ab01-df00215c34a1-catalog-content\") pod \"community-operators-bch54\" (UID: \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\") " pod="openshift-marketplace/community-operators-bch54" Jan 27 13:32:49 crc kubenswrapper[4900]: I0127 13:32:49.881821 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dc9cd4a-90da-42cf-ab01-df00215c34a1-catalog-content\") pod \"community-operators-bch54\" (UID: \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\") " pod="openshift-marketplace/community-operators-bch54" Jan 27 13:32:49 crc kubenswrapper[4900]: I0127 13:32:49.888772 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dc9cd4a-90da-42cf-ab01-df00215c34a1-utilities\") pod \"community-operators-bch54\" (UID: \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\") " pod="openshift-marketplace/community-operators-bch54" Jan 27 13:32:49 crc kubenswrapper[4900]: I0127 13:32:49.926148 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmwnq\" (UniqueName: \"kubernetes.io/projected/3dc9cd4a-90da-42cf-ab01-df00215c34a1-kube-api-access-zmwnq\") pod \"community-operators-bch54\" (UID: \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\") " pod="openshift-marketplace/community-operators-bch54" Jan 27 13:32:50 crc kubenswrapper[4900]: I0127 13:32:50.029545 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bch54" Jan 27 13:32:50 crc kubenswrapper[4900]: I0127 13:32:50.695705 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bch54"] Jan 27 13:32:50 crc kubenswrapper[4900]: I0127 13:32:50.933325 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bch54" event={"ID":"3dc9cd4a-90da-42cf-ab01-df00215c34a1","Type":"ContainerStarted","Data":"ae9df9b76c594eb4e63dfce1903d09f0425788f8851452731903aab45cbbd2cf"} Jan 27 13:32:51 crc kubenswrapper[4900]: I0127 13:32:51.570990 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dkmsz" Jan 27 13:32:51 crc kubenswrapper[4900]: I0127 13:32:51.949526 4900 generic.go:334] "Generic (PLEG): container finished" podID="3dc9cd4a-90da-42cf-ab01-df00215c34a1" containerID="5d9da8333fd37aa1f3704a80bc87b0b706e95c279c6439e30c298894e98dad47" exitCode=0 Jan 27 13:32:51 crc kubenswrapper[4900]: I0127 13:32:51.949580 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bch54" event={"ID":"3dc9cd4a-90da-42cf-ab01-df00215c34a1","Type":"ContainerDied","Data":"5d9da8333fd37aa1f3704a80bc87b0b706e95c279c6439e30c298894e98dad47"} Jan 27 13:32:52 crc kubenswrapper[4900]: I0127 13:32:52.964708 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bch54" event={"ID":"3dc9cd4a-90da-42cf-ab01-df00215c34a1","Type":"ContainerStarted","Data":"81d1850c34ba8cc4632b9c22839b0c666f69b1eb13860e4fd54caa0ee83de138"} Jan 27 13:32:53 crc kubenswrapper[4900]: I0127 13:32:53.695229 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dkmsz"] Jan 27 13:32:54 crc kubenswrapper[4900]: I0127 13:32:54.032363 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8kh64"] Jan 27 13:32:54 crc kubenswrapper[4900]: I0127 13:32:54.033040 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8kh64" podUID="cd975c7e-d2bf-43ac-bc97-e8efa9e00611" containerName="registry-server" containerID="cri-o://8bbc29bf4296417df7742ca79508ba18e81b0d70dce36fd777b5dbbc227dd5b0" gracePeriod=2 Jan 27 13:32:54 crc kubenswrapper[4900]: I0127 13:32:54.787847 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8kh64" Jan 27 13:32:54 crc kubenswrapper[4900]: I0127 13:32:54.880353 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-utilities\") pod \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\" (UID: \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\") " Jan 27 13:32:54 crc kubenswrapper[4900]: I0127 13:32:54.880702 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sl4mc\" (UniqueName: \"kubernetes.io/projected/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-kube-api-access-sl4mc\") pod \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\" (UID: \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\") " Jan 27 13:32:54 crc kubenswrapper[4900]: I0127 13:32:54.880742 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-catalog-content\") pod \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\" (UID: \"cd975c7e-d2bf-43ac-bc97-e8efa9e00611\") " Jan 27 13:32:54 crc kubenswrapper[4900]: I0127 13:32:54.883568 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-utilities" (OuterVolumeSpecName: "utilities") pod "cd975c7e-d2bf-43ac-bc97-e8efa9e00611" (UID: "cd975c7e-d2bf-43ac-bc97-e8efa9e00611"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:32:54 crc kubenswrapper[4900]: I0127 13:32:54.902244 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-kube-api-access-sl4mc" (OuterVolumeSpecName: "kube-api-access-sl4mc") pod "cd975c7e-d2bf-43ac-bc97-e8efa9e00611" (UID: "cd975c7e-d2bf-43ac-bc97-e8efa9e00611"). InnerVolumeSpecName "kube-api-access-sl4mc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:32:54 crc kubenswrapper[4900]: I0127 13:32:54.991868 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:32:54 crc kubenswrapper[4900]: I0127 13:32:54.991931 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sl4mc\" (UniqueName: \"kubernetes.io/projected/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-kube-api-access-sl4mc\") on node \"crc\" DevicePath \"\"" Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.041384 4900 generic.go:334] "Generic (PLEG): container finished" podID="3dc9cd4a-90da-42cf-ab01-df00215c34a1" containerID="81d1850c34ba8cc4632b9c22839b0c666f69b1eb13860e4fd54caa0ee83de138" exitCode=0 Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.041541 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bch54" event={"ID":"3dc9cd4a-90da-42cf-ab01-df00215c34a1","Type":"ContainerDied","Data":"81d1850c34ba8cc4632b9c22839b0c666f69b1eb13860e4fd54caa0ee83de138"} Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.054882 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cd975c7e-d2bf-43ac-bc97-e8efa9e00611" (UID: "cd975c7e-d2bf-43ac-bc97-e8efa9e00611"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.075501 4900 generic.go:334] "Generic (PLEG): container finished" podID="cd975c7e-d2bf-43ac-bc97-e8efa9e00611" containerID="8bbc29bf4296417df7742ca79508ba18e81b0d70dce36fd777b5dbbc227dd5b0" exitCode=0 Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.075940 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8kh64" event={"ID":"cd975c7e-d2bf-43ac-bc97-e8efa9e00611","Type":"ContainerDied","Data":"8bbc29bf4296417df7742ca79508ba18e81b0d70dce36fd777b5dbbc227dd5b0"} Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.075988 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8kh64" event={"ID":"cd975c7e-d2bf-43ac-bc97-e8efa9e00611","Type":"ContainerDied","Data":"8b62554df23bbd0b4e951555fd98e876e2baeec32e0dc50ba6a57a772dfa5473"} Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.076011 4900 scope.go:117] "RemoveContainer" containerID="8bbc29bf4296417df7742ca79508ba18e81b0d70dce36fd777b5dbbc227dd5b0" Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.076285 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8kh64" Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.098811 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd975c7e-d2bf-43ac-bc97-e8efa9e00611-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.139290 4900 scope.go:117] "RemoveContainer" containerID="225137c4a32926a60fc312b242b5acb397148c56b87ea363acafa51c52983477" Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.238329 4900 scope.go:117] "RemoveContainer" containerID="774b15fedf47f2461594eebebd4f77e1c8decef24bf39c77266b7a0b10e7fbf4" Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.277130 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8kh64"] Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.319301 4900 scope.go:117] "RemoveContainer" containerID="8bbc29bf4296417df7742ca79508ba18e81b0d70dce36fd777b5dbbc227dd5b0" Jan 27 13:32:55 crc kubenswrapper[4900]: E0127 13:32:55.320523 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bbc29bf4296417df7742ca79508ba18e81b0d70dce36fd777b5dbbc227dd5b0\": container with ID starting with 8bbc29bf4296417df7742ca79508ba18e81b0d70dce36fd777b5dbbc227dd5b0 not found: ID does not exist" containerID="8bbc29bf4296417df7742ca79508ba18e81b0d70dce36fd777b5dbbc227dd5b0" Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.320603 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bbc29bf4296417df7742ca79508ba18e81b0d70dce36fd777b5dbbc227dd5b0"} err="failed to get container status \"8bbc29bf4296417df7742ca79508ba18e81b0d70dce36fd777b5dbbc227dd5b0\": rpc error: code = NotFound desc = could not find container \"8bbc29bf4296417df7742ca79508ba18e81b0d70dce36fd777b5dbbc227dd5b0\": container with ID starting with 8bbc29bf4296417df7742ca79508ba18e81b0d70dce36fd777b5dbbc227dd5b0 not found: ID does not exist" Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.320654 4900 scope.go:117] "RemoveContainer" containerID="225137c4a32926a60fc312b242b5acb397148c56b87ea363acafa51c52983477" Jan 27 13:32:55 crc kubenswrapper[4900]: E0127 13:32:55.329704 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"225137c4a32926a60fc312b242b5acb397148c56b87ea363acafa51c52983477\": container with ID starting with 225137c4a32926a60fc312b242b5acb397148c56b87ea363acafa51c52983477 not found: ID does not exist" containerID="225137c4a32926a60fc312b242b5acb397148c56b87ea363acafa51c52983477" Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.329795 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"225137c4a32926a60fc312b242b5acb397148c56b87ea363acafa51c52983477"} err="failed to get container status \"225137c4a32926a60fc312b242b5acb397148c56b87ea363acafa51c52983477\": rpc error: code = NotFound desc = could not find container \"225137c4a32926a60fc312b242b5acb397148c56b87ea363acafa51c52983477\": container with ID starting with 225137c4a32926a60fc312b242b5acb397148c56b87ea363acafa51c52983477 not found: ID does not exist" Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.329852 4900 scope.go:117] "RemoveContainer" containerID="774b15fedf47f2461594eebebd4f77e1c8decef24bf39c77266b7a0b10e7fbf4" Jan 27 13:32:55 crc kubenswrapper[4900]: E0127 13:32:55.334241 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"774b15fedf47f2461594eebebd4f77e1c8decef24bf39c77266b7a0b10e7fbf4\": container with ID starting with 774b15fedf47f2461594eebebd4f77e1c8decef24bf39c77266b7a0b10e7fbf4 not found: ID does not exist" containerID="774b15fedf47f2461594eebebd4f77e1c8decef24bf39c77266b7a0b10e7fbf4" Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.334345 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"774b15fedf47f2461594eebebd4f77e1c8decef24bf39c77266b7a0b10e7fbf4"} err="failed to get container status \"774b15fedf47f2461594eebebd4f77e1c8decef24bf39c77266b7a0b10e7fbf4\": rpc error: code = NotFound desc = could not find container \"774b15fedf47f2461594eebebd4f77e1c8decef24bf39c77266b7a0b10e7fbf4\": container with ID starting with 774b15fedf47f2461594eebebd4f77e1c8decef24bf39c77266b7a0b10e7fbf4 not found: ID does not exist" Jan 27 13:32:55 crc kubenswrapper[4900]: I0127 13:32:55.341334 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8kh64"] Jan 27 13:32:56 crc kubenswrapper[4900]: I0127 13:32:56.125855 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bch54" event={"ID":"3dc9cd4a-90da-42cf-ab01-df00215c34a1","Type":"ContainerStarted","Data":"c5d634842578f530877e4dfb24457dc4730fc03b1da3c3b70d533984356cd967"} Jan 27 13:32:56 crc kubenswrapper[4900]: I0127 13:32:56.175489 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bch54" podStartSLOduration=3.362904933 podStartE2EDuration="7.175421941s" podCreationTimestamp="2026-01-27 13:32:49 +0000 UTC" firstStartedPulling="2026-01-27 13:32:51.953021619 +0000 UTC m=+3999.190049829" lastFinishedPulling="2026-01-27 13:32:55.765538627 +0000 UTC m=+4003.002566837" observedRunningTime="2026-01-27 13:32:56.156667172 +0000 UTC m=+4003.393695392" watchObservedRunningTime="2026-01-27 13:32:56.175421941 +0000 UTC m=+4003.412450151" Jan 27 13:32:56 crc kubenswrapper[4900]: I0127 13:32:56.516310 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd975c7e-d2bf-43ac-bc97-e8efa9e00611" path="/var/lib/kubelet/pods/cd975c7e-d2bf-43ac-bc97-e8efa9e00611/volumes" Jan 27 13:33:00 crc kubenswrapper[4900]: I0127 13:33:00.031454 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bch54" Jan 27 13:33:00 crc kubenswrapper[4900]: I0127 13:33:00.032457 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bch54" Jan 27 13:33:00 crc kubenswrapper[4900]: I0127 13:33:00.103894 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bch54" Jan 27 13:33:10 crc kubenswrapper[4900]: I0127 13:33:10.093733 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bch54" Jan 27 13:33:10 crc kubenswrapper[4900]: I0127 13:33:10.185117 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bch54"] Jan 27 13:33:10 crc kubenswrapper[4900]: I0127 13:33:10.331731 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bch54" podUID="3dc9cd4a-90da-42cf-ab01-df00215c34a1" containerName="registry-server" containerID="cri-o://c5d634842578f530877e4dfb24457dc4730fc03b1da3c3b70d533984356cd967" gracePeriod=2 Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.025736 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bch54" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.114101 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dc9cd4a-90da-42cf-ab01-df00215c34a1-catalog-content\") pod \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\" (UID: \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\") " Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.114190 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dc9cd4a-90da-42cf-ab01-df00215c34a1-utilities\") pod \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\" (UID: \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\") " Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.114393 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmwnq\" (UniqueName: \"kubernetes.io/projected/3dc9cd4a-90da-42cf-ab01-df00215c34a1-kube-api-access-zmwnq\") pod \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\" (UID: \"3dc9cd4a-90da-42cf-ab01-df00215c34a1\") " Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.115973 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3dc9cd4a-90da-42cf-ab01-df00215c34a1-utilities" (OuterVolumeSpecName: "utilities") pod "3dc9cd4a-90da-42cf-ab01-df00215c34a1" (UID: "3dc9cd4a-90da-42cf-ab01-df00215c34a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.116993 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dc9cd4a-90da-42cf-ab01-df00215c34a1-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.127931 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dc9cd4a-90da-42cf-ab01-df00215c34a1-kube-api-access-zmwnq" (OuterVolumeSpecName: "kube-api-access-zmwnq") pod "3dc9cd4a-90da-42cf-ab01-df00215c34a1" (UID: "3dc9cd4a-90da-42cf-ab01-df00215c34a1"). InnerVolumeSpecName "kube-api-access-zmwnq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.200754 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3dc9cd4a-90da-42cf-ab01-df00215c34a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3dc9cd4a-90da-42cf-ab01-df00215c34a1" (UID: "3dc9cd4a-90da-42cf-ab01-df00215c34a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.220937 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dc9cd4a-90da-42cf-ab01-df00215c34a1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.221003 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmwnq\" (UniqueName: \"kubernetes.io/projected/3dc9cd4a-90da-42cf-ab01-df00215c34a1-kube-api-access-zmwnq\") on node \"crc\" DevicePath \"\"" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.348987 4900 generic.go:334] "Generic (PLEG): container finished" podID="3dc9cd4a-90da-42cf-ab01-df00215c34a1" containerID="c5d634842578f530877e4dfb24457dc4730fc03b1da3c3b70d533984356cd967" exitCode=0 Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.349086 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bch54" event={"ID":"3dc9cd4a-90da-42cf-ab01-df00215c34a1","Type":"ContainerDied","Data":"c5d634842578f530877e4dfb24457dc4730fc03b1da3c3b70d533984356cd967"} Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.349146 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bch54" event={"ID":"3dc9cd4a-90da-42cf-ab01-df00215c34a1","Type":"ContainerDied","Data":"ae9df9b76c594eb4e63dfce1903d09f0425788f8851452731903aab45cbbd2cf"} Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.349176 4900 scope.go:117] "RemoveContainer" containerID="c5d634842578f530877e4dfb24457dc4730fc03b1da3c3b70d533984356cd967" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.349180 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bch54" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.396856 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bch54"] Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.413177 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bch54"] Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.453442 4900 scope.go:117] "RemoveContainer" containerID="81d1850c34ba8cc4632b9c22839b0c666f69b1eb13860e4fd54caa0ee83de138" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.504315 4900 scope.go:117] "RemoveContainer" containerID="5d9da8333fd37aa1f3704a80bc87b0b706e95c279c6439e30c298894e98dad47" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.568397 4900 scope.go:117] "RemoveContainer" containerID="c5d634842578f530877e4dfb24457dc4730fc03b1da3c3b70d533984356cd967" Jan 27 13:33:11 crc kubenswrapper[4900]: E0127 13:33:11.569423 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5d634842578f530877e4dfb24457dc4730fc03b1da3c3b70d533984356cd967\": container with ID starting with c5d634842578f530877e4dfb24457dc4730fc03b1da3c3b70d533984356cd967 not found: ID does not exist" containerID="c5d634842578f530877e4dfb24457dc4730fc03b1da3c3b70d533984356cd967" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.569503 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5d634842578f530877e4dfb24457dc4730fc03b1da3c3b70d533984356cd967"} err="failed to get container status \"c5d634842578f530877e4dfb24457dc4730fc03b1da3c3b70d533984356cd967\": rpc error: code = NotFound desc = could not find container \"c5d634842578f530877e4dfb24457dc4730fc03b1da3c3b70d533984356cd967\": container with ID starting with c5d634842578f530877e4dfb24457dc4730fc03b1da3c3b70d533984356cd967 not found: ID does not exist" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.569545 4900 scope.go:117] "RemoveContainer" containerID="81d1850c34ba8cc4632b9c22839b0c666f69b1eb13860e4fd54caa0ee83de138" Jan 27 13:33:11 crc kubenswrapper[4900]: E0127 13:33:11.570156 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81d1850c34ba8cc4632b9c22839b0c666f69b1eb13860e4fd54caa0ee83de138\": container with ID starting with 81d1850c34ba8cc4632b9c22839b0c666f69b1eb13860e4fd54caa0ee83de138 not found: ID does not exist" containerID="81d1850c34ba8cc4632b9c22839b0c666f69b1eb13860e4fd54caa0ee83de138" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.570206 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81d1850c34ba8cc4632b9c22839b0c666f69b1eb13860e4fd54caa0ee83de138"} err="failed to get container status \"81d1850c34ba8cc4632b9c22839b0c666f69b1eb13860e4fd54caa0ee83de138\": rpc error: code = NotFound desc = could not find container \"81d1850c34ba8cc4632b9c22839b0c666f69b1eb13860e4fd54caa0ee83de138\": container with ID starting with 81d1850c34ba8cc4632b9c22839b0c666f69b1eb13860e4fd54caa0ee83de138 not found: ID does not exist" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.570244 4900 scope.go:117] "RemoveContainer" containerID="5d9da8333fd37aa1f3704a80bc87b0b706e95c279c6439e30c298894e98dad47" Jan 27 13:33:11 crc kubenswrapper[4900]: E0127 13:33:11.571049 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d9da8333fd37aa1f3704a80bc87b0b706e95c279c6439e30c298894e98dad47\": container with ID starting with 5d9da8333fd37aa1f3704a80bc87b0b706e95c279c6439e30c298894e98dad47 not found: ID does not exist" containerID="5d9da8333fd37aa1f3704a80bc87b0b706e95c279c6439e30c298894e98dad47" Jan 27 13:33:11 crc kubenswrapper[4900]: I0127 13:33:11.571128 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d9da8333fd37aa1f3704a80bc87b0b706e95c279c6439e30c298894e98dad47"} err="failed to get container status \"5d9da8333fd37aa1f3704a80bc87b0b706e95c279c6439e30c298894e98dad47\": rpc error: code = NotFound desc = could not find container \"5d9da8333fd37aa1f3704a80bc87b0b706e95c279c6439e30c298894e98dad47\": container with ID starting with 5d9da8333fd37aa1f3704a80bc87b0b706e95c279c6439e30c298894e98dad47 not found: ID does not exist" Jan 27 13:33:12 crc kubenswrapper[4900]: I0127 13:33:12.500036 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3dc9cd4a-90da-42cf-ab01-df00215c34a1" path="/var/lib/kubelet/pods/3dc9cd4a-90da-42cf-ab01-df00215c34a1/volumes" Jan 27 13:34:22 crc kubenswrapper[4900]: I0127 13:34:22.372736 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:34:22 crc kubenswrapper[4900]: I0127 13:34:22.374945 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:34:52 crc kubenswrapper[4900]: I0127 13:34:52.372431 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:34:52 crc kubenswrapper[4900]: I0127 13:34:52.372985 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:35:22 crc kubenswrapper[4900]: I0127 13:35:22.373250 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:35:22 crc kubenswrapper[4900]: I0127 13:35:22.374149 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:35:22 crc kubenswrapper[4900]: I0127 13:35:22.374222 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 13:35:22 crc kubenswrapper[4900]: I0127 13:35:22.375513 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5ce944e0655d1f982b63e8219b9d6bf0e4b923dfb5eb0c1a7ce20dd9d608fd8f"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 13:35:22 crc kubenswrapper[4900]: I0127 13:35:22.375574 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://5ce944e0655d1f982b63e8219b9d6bf0e4b923dfb5eb0c1a7ce20dd9d608fd8f" gracePeriod=600 Jan 27 13:35:22 crc kubenswrapper[4900]: I0127 13:35:22.971502 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="5ce944e0655d1f982b63e8219b9d6bf0e4b923dfb5eb0c1a7ce20dd9d608fd8f" exitCode=0 Jan 27 13:35:22 crc kubenswrapper[4900]: I0127 13:35:22.971658 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"5ce944e0655d1f982b63e8219b9d6bf0e4b923dfb5eb0c1a7ce20dd9d608fd8f"} Jan 27 13:35:22 crc kubenswrapper[4900]: I0127 13:35:22.971886 4900 scope.go:117] "RemoveContainer" containerID="2279205ef55573420f07a0620795267c0302666bc4af63a1edff7f10b11f2b11" Jan 27 13:35:23 crc kubenswrapper[4900]: I0127 13:35:23.995968 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3"} Jan 27 13:37:36 crc kubenswrapper[4900]: I0127 13:37:36.943960 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ck2hm"] Jan 27 13:37:36 crc kubenswrapper[4900]: E0127 13:37:36.945460 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dc9cd4a-90da-42cf-ab01-df00215c34a1" containerName="extract-content" Jan 27 13:37:36 crc kubenswrapper[4900]: I0127 13:37:36.945475 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dc9cd4a-90da-42cf-ab01-df00215c34a1" containerName="extract-content" Jan 27 13:37:36 crc kubenswrapper[4900]: E0127 13:37:36.945496 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dc9cd4a-90da-42cf-ab01-df00215c34a1" containerName="registry-server" Jan 27 13:37:36 crc kubenswrapper[4900]: I0127 13:37:36.945502 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dc9cd4a-90da-42cf-ab01-df00215c34a1" containerName="registry-server" Jan 27 13:37:36 crc kubenswrapper[4900]: E0127 13:37:36.945525 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dc9cd4a-90da-42cf-ab01-df00215c34a1" containerName="extract-utilities" Jan 27 13:37:36 crc kubenswrapper[4900]: I0127 13:37:36.945532 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dc9cd4a-90da-42cf-ab01-df00215c34a1" containerName="extract-utilities" Jan 27 13:37:36 crc kubenswrapper[4900]: E0127 13:37:36.945563 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd975c7e-d2bf-43ac-bc97-e8efa9e00611" containerName="extract-utilities" Jan 27 13:37:36 crc kubenswrapper[4900]: I0127 13:37:36.945569 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd975c7e-d2bf-43ac-bc97-e8efa9e00611" containerName="extract-utilities" Jan 27 13:37:36 crc kubenswrapper[4900]: E0127 13:37:36.945587 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd975c7e-d2bf-43ac-bc97-e8efa9e00611" containerName="registry-server" Jan 27 13:37:36 crc kubenswrapper[4900]: I0127 13:37:36.945593 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd975c7e-d2bf-43ac-bc97-e8efa9e00611" containerName="registry-server" Jan 27 13:37:36 crc kubenswrapper[4900]: E0127 13:37:36.945605 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd975c7e-d2bf-43ac-bc97-e8efa9e00611" containerName="extract-content" Jan 27 13:37:36 crc kubenswrapper[4900]: I0127 13:37:36.945614 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd975c7e-d2bf-43ac-bc97-e8efa9e00611" containerName="extract-content" Jan 27 13:37:36 crc kubenswrapper[4900]: I0127 13:37:36.945858 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dc9cd4a-90da-42cf-ab01-df00215c34a1" containerName="registry-server" Jan 27 13:37:36 crc kubenswrapper[4900]: I0127 13:37:36.945890 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd975c7e-d2bf-43ac-bc97-e8efa9e00611" containerName="registry-server" Jan 27 13:37:36 crc kubenswrapper[4900]: I0127 13:37:36.948032 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:36 crc kubenswrapper[4900]: I0127 13:37:36.971176 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ck2hm"] Jan 27 13:37:37 crc kubenswrapper[4900]: I0127 13:37:37.113459 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-utilities\") pod \"redhat-marketplace-ck2hm\" (UID: \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\") " pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:37 crc kubenswrapper[4900]: I0127 13:37:37.113705 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5x64h\" (UniqueName: \"kubernetes.io/projected/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-kube-api-access-5x64h\") pod \"redhat-marketplace-ck2hm\" (UID: \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\") " pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:37 crc kubenswrapper[4900]: I0127 13:37:37.113835 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-catalog-content\") pod \"redhat-marketplace-ck2hm\" (UID: \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\") " pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:37 crc kubenswrapper[4900]: I0127 13:37:37.217439 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-utilities\") pod \"redhat-marketplace-ck2hm\" (UID: \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\") " pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:37 crc kubenswrapper[4900]: I0127 13:37:37.217555 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5x64h\" (UniqueName: \"kubernetes.io/projected/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-kube-api-access-5x64h\") pod \"redhat-marketplace-ck2hm\" (UID: \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\") " pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:37 crc kubenswrapper[4900]: I0127 13:37:37.217623 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-catalog-content\") pod \"redhat-marketplace-ck2hm\" (UID: \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\") " pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:37 crc kubenswrapper[4900]: I0127 13:37:37.218404 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-catalog-content\") pod \"redhat-marketplace-ck2hm\" (UID: \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\") " pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:37 crc kubenswrapper[4900]: I0127 13:37:37.218837 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-utilities\") pod \"redhat-marketplace-ck2hm\" (UID: \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\") " pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:37 crc kubenswrapper[4900]: I0127 13:37:37.243567 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5x64h\" (UniqueName: \"kubernetes.io/projected/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-kube-api-access-5x64h\") pod \"redhat-marketplace-ck2hm\" (UID: \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\") " pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:37 crc kubenswrapper[4900]: I0127 13:37:37.288948 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:37 crc kubenswrapper[4900]: I0127 13:37:37.874881 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ck2hm"] Jan 27 13:37:38 crc kubenswrapper[4900]: I0127 13:37:38.228140 4900 generic.go:334] "Generic (PLEG): container finished" podID="be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" containerID="d0897791f37a5304f03fffe7815ae0733b426beb08037423086c80be7b7a70b7" exitCode=0 Jan 27 13:37:38 crc kubenswrapper[4900]: I0127 13:37:38.228202 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ck2hm" event={"ID":"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01","Type":"ContainerDied","Data":"d0897791f37a5304f03fffe7815ae0733b426beb08037423086c80be7b7a70b7"} Jan 27 13:37:38 crc kubenswrapper[4900]: I0127 13:37:38.228250 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ck2hm" event={"ID":"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01","Type":"ContainerStarted","Data":"9b09b4af529bf51450ab1a5330f9ba9adb39b2abc4695e437eb7b52ab6a91706"} Jan 27 13:37:38 crc kubenswrapper[4900]: I0127 13:37:38.231588 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 13:37:39 crc kubenswrapper[4900]: I0127 13:37:39.243724 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ck2hm" event={"ID":"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01","Type":"ContainerStarted","Data":"384b0e00bb384467e61ddce027bdca20f99de181bf89fb4f67460d9512d3e8b9"} Jan 27 13:37:40 crc kubenswrapper[4900]: I0127 13:37:40.259786 4900 generic.go:334] "Generic (PLEG): container finished" podID="be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" containerID="384b0e00bb384467e61ddce027bdca20f99de181bf89fb4f67460d9512d3e8b9" exitCode=0 Jan 27 13:37:40 crc kubenswrapper[4900]: I0127 13:37:40.259886 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ck2hm" event={"ID":"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01","Type":"ContainerDied","Data":"384b0e00bb384467e61ddce027bdca20f99de181bf89fb4f67460d9512d3e8b9"} Jan 27 13:37:41 crc kubenswrapper[4900]: I0127 13:37:41.278159 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ck2hm" event={"ID":"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01","Type":"ContainerStarted","Data":"6d530b3311eecc75b1303ee0694420bde2b4bae3ce2cd94c46b66888c00fd9b2"} Jan 27 13:37:41 crc kubenswrapper[4900]: I0127 13:37:41.320358 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ck2hm" podStartSLOduration=2.895765237 podStartE2EDuration="5.320327195s" podCreationTimestamp="2026-01-27 13:37:36 +0000 UTC" firstStartedPulling="2026-01-27 13:37:38.231196204 +0000 UTC m=+4285.468224424" lastFinishedPulling="2026-01-27 13:37:40.655758162 +0000 UTC m=+4287.892786382" observedRunningTime="2026-01-27 13:37:41.309695859 +0000 UTC m=+4288.546724069" watchObservedRunningTime="2026-01-27 13:37:41.320327195 +0000 UTC m=+4288.557355405" Jan 27 13:37:47 crc kubenswrapper[4900]: I0127 13:37:47.289507 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:47 crc kubenswrapper[4900]: I0127 13:37:47.290351 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:47 crc kubenswrapper[4900]: I0127 13:37:47.362588 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:47 crc kubenswrapper[4900]: I0127 13:37:47.477432 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:47 crc kubenswrapper[4900]: I0127 13:37:47.638686 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ck2hm"] Jan 27 13:37:49 crc kubenswrapper[4900]: I0127 13:37:49.377767 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ck2hm" podUID="be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" containerName="registry-server" containerID="cri-o://6d530b3311eecc75b1303ee0694420bde2b4bae3ce2cd94c46b66888c00fd9b2" gracePeriod=2 Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.057609 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.146584 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-utilities\") pod \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\" (UID: \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\") " Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.146848 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-catalog-content\") pod \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\" (UID: \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\") " Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.147897 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-utilities" (OuterVolumeSpecName: "utilities") pod "be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" (UID: "be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.149579 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5x64h\" (UniqueName: \"kubernetes.io/projected/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-kube-api-access-5x64h\") pod \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\" (UID: \"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01\") " Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.151033 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.163134 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-kube-api-access-5x64h" (OuterVolumeSpecName: "kube-api-access-5x64h") pod "be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" (UID: "be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01"). InnerVolumeSpecName "kube-api-access-5x64h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.177555 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" (UID: "be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.254295 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.254339 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5x64h\" (UniqueName: \"kubernetes.io/projected/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01-kube-api-access-5x64h\") on node \"crc\" DevicePath \"\"" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.405831 4900 generic.go:334] "Generic (PLEG): container finished" podID="be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" containerID="6d530b3311eecc75b1303ee0694420bde2b4bae3ce2cd94c46b66888c00fd9b2" exitCode=0 Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.405933 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ck2hm" event={"ID":"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01","Type":"ContainerDied","Data":"6d530b3311eecc75b1303ee0694420bde2b4bae3ce2cd94c46b66888c00fd9b2"} Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.405978 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ck2hm" event={"ID":"be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01","Type":"ContainerDied","Data":"9b09b4af529bf51450ab1a5330f9ba9adb39b2abc4695e437eb7b52ab6a91706"} Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.406004 4900 scope.go:117] "RemoveContainer" containerID="6d530b3311eecc75b1303ee0694420bde2b4bae3ce2cd94c46b66888c00fd9b2" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.406133 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ck2hm" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.448020 4900 scope.go:117] "RemoveContainer" containerID="384b0e00bb384467e61ddce027bdca20f99de181bf89fb4f67460d9512d3e8b9" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.484257 4900 scope.go:117] "RemoveContainer" containerID="d0897791f37a5304f03fffe7815ae0733b426beb08037423086c80be7b7a70b7" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.506292 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ck2hm"] Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.506341 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ck2hm"] Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.568401 4900 scope.go:117] "RemoveContainer" containerID="6d530b3311eecc75b1303ee0694420bde2b4bae3ce2cd94c46b66888c00fd9b2" Jan 27 13:37:50 crc kubenswrapper[4900]: E0127 13:37:50.568939 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d530b3311eecc75b1303ee0694420bde2b4bae3ce2cd94c46b66888c00fd9b2\": container with ID starting with 6d530b3311eecc75b1303ee0694420bde2b4bae3ce2cd94c46b66888c00fd9b2 not found: ID does not exist" containerID="6d530b3311eecc75b1303ee0694420bde2b4bae3ce2cd94c46b66888c00fd9b2" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.568985 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d530b3311eecc75b1303ee0694420bde2b4bae3ce2cd94c46b66888c00fd9b2"} err="failed to get container status \"6d530b3311eecc75b1303ee0694420bde2b4bae3ce2cd94c46b66888c00fd9b2\": rpc error: code = NotFound desc = could not find container \"6d530b3311eecc75b1303ee0694420bde2b4bae3ce2cd94c46b66888c00fd9b2\": container with ID starting with 6d530b3311eecc75b1303ee0694420bde2b4bae3ce2cd94c46b66888c00fd9b2 not found: ID does not exist" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.569018 4900 scope.go:117] "RemoveContainer" containerID="384b0e00bb384467e61ddce027bdca20f99de181bf89fb4f67460d9512d3e8b9" Jan 27 13:37:50 crc kubenswrapper[4900]: E0127 13:37:50.569351 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"384b0e00bb384467e61ddce027bdca20f99de181bf89fb4f67460d9512d3e8b9\": container with ID starting with 384b0e00bb384467e61ddce027bdca20f99de181bf89fb4f67460d9512d3e8b9 not found: ID does not exist" containerID="384b0e00bb384467e61ddce027bdca20f99de181bf89fb4f67460d9512d3e8b9" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.569377 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"384b0e00bb384467e61ddce027bdca20f99de181bf89fb4f67460d9512d3e8b9"} err="failed to get container status \"384b0e00bb384467e61ddce027bdca20f99de181bf89fb4f67460d9512d3e8b9\": rpc error: code = NotFound desc = could not find container \"384b0e00bb384467e61ddce027bdca20f99de181bf89fb4f67460d9512d3e8b9\": container with ID starting with 384b0e00bb384467e61ddce027bdca20f99de181bf89fb4f67460d9512d3e8b9 not found: ID does not exist" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.569393 4900 scope.go:117] "RemoveContainer" containerID="d0897791f37a5304f03fffe7815ae0733b426beb08037423086c80be7b7a70b7" Jan 27 13:37:50 crc kubenswrapper[4900]: E0127 13:37:50.569574 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0897791f37a5304f03fffe7815ae0733b426beb08037423086c80be7b7a70b7\": container with ID starting with d0897791f37a5304f03fffe7815ae0733b426beb08037423086c80be7b7a70b7 not found: ID does not exist" containerID="d0897791f37a5304f03fffe7815ae0733b426beb08037423086c80be7b7a70b7" Jan 27 13:37:50 crc kubenswrapper[4900]: I0127 13:37:50.569598 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0897791f37a5304f03fffe7815ae0733b426beb08037423086c80be7b7a70b7"} err="failed to get container status \"d0897791f37a5304f03fffe7815ae0733b426beb08037423086c80be7b7a70b7\": rpc error: code = NotFound desc = could not find container \"d0897791f37a5304f03fffe7815ae0733b426beb08037423086c80be7b7a70b7\": container with ID starting with d0897791f37a5304f03fffe7815ae0733b426beb08037423086c80be7b7a70b7 not found: ID does not exist" Jan 27 13:37:52 crc kubenswrapper[4900]: I0127 13:37:52.373330 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:37:52 crc kubenswrapper[4900]: I0127 13:37:52.373851 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:37:52 crc kubenswrapper[4900]: I0127 13:37:52.497725 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" path="/var/lib/kubelet/pods/be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01/volumes" Jan 27 13:38:22 crc kubenswrapper[4900]: I0127 13:38:22.372992 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:38:22 crc kubenswrapper[4900]: I0127 13:38:22.373948 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:38:52 crc kubenswrapper[4900]: I0127 13:38:52.382462 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:38:52 crc kubenswrapper[4900]: I0127 13:38:52.383047 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:38:52 crc kubenswrapper[4900]: I0127 13:38:52.383137 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 13:38:52 crc kubenswrapper[4900]: I0127 13:38:52.386651 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 13:38:52 crc kubenswrapper[4900]: I0127 13:38:52.386721 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" gracePeriod=600 Jan 27 13:38:52 crc kubenswrapper[4900]: E0127 13:38:52.514921 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:38:53 crc kubenswrapper[4900]: I0127 13:38:53.265010 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" exitCode=0 Jan 27 13:38:53 crc kubenswrapper[4900]: I0127 13:38:53.265122 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3"} Jan 27 13:38:53 crc kubenswrapper[4900]: I0127 13:38:53.265474 4900 scope.go:117] "RemoveContainer" containerID="5ce944e0655d1f982b63e8219b9d6bf0e4b923dfb5eb0c1a7ce20dd9d608fd8f" Jan 27 13:38:53 crc kubenswrapper[4900]: I0127 13:38:53.266695 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:38:53 crc kubenswrapper[4900]: E0127 13:38:53.267310 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:39:03 crc kubenswrapper[4900]: I0127 13:39:03.483035 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:39:03 crc kubenswrapper[4900]: E0127 13:39:03.484359 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:39:15 crc kubenswrapper[4900]: I0127 13:39:15.483782 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:39:15 crc kubenswrapper[4900]: E0127 13:39:15.485075 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:39:26 crc kubenswrapper[4900]: I0127 13:39:26.495553 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:39:26 crc kubenswrapper[4900]: E0127 13:39:26.496830 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:39:40 crc kubenswrapper[4900]: I0127 13:39:40.483525 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:39:40 crc kubenswrapper[4900]: E0127 13:39:40.484858 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:39:55 crc kubenswrapper[4900]: I0127 13:39:55.483783 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:39:55 crc kubenswrapper[4900]: E0127 13:39:55.485309 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:40:08 crc kubenswrapper[4900]: I0127 13:40:08.484846 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:40:08 crc kubenswrapper[4900]: E0127 13:40:08.486012 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:40:21 crc kubenswrapper[4900]: I0127 13:40:21.483582 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:40:21 crc kubenswrapper[4900]: E0127 13:40:21.484740 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:40:34 crc kubenswrapper[4900]: I0127 13:40:34.482888 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:40:34 crc kubenswrapper[4900]: E0127 13:40:34.484242 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:40:45 crc kubenswrapper[4900]: I0127 13:40:45.486294 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:40:45 crc kubenswrapper[4900]: E0127 13:40:45.487914 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:40:58 crc kubenswrapper[4900]: I0127 13:40:58.482792 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:40:58 crc kubenswrapper[4900]: E0127 13:40:58.483836 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.075816 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hzjdt"] Jan 27 13:41:02 crc kubenswrapper[4900]: E0127 13:41:02.079950 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" containerName="registry-server" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.080117 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" containerName="registry-server" Jan 27 13:41:02 crc kubenswrapper[4900]: E0127 13:41:02.080244 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" containerName="extract-utilities" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.080328 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" containerName="extract-utilities" Jan 27 13:41:02 crc kubenswrapper[4900]: E0127 13:41:02.080452 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" containerName="extract-content" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.080540 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" containerName="extract-content" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.080992 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="be4ecefc-eae9-48b8-b0fb-7e3a0e4aee01" containerName="registry-server" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.085371 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.159758 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hzjdt"] Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.185574 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxjzv\" (UniqueName: \"kubernetes.io/projected/a0d6aa78-4ffd-417c-afbb-980932c162a7-kube-api-access-cxjzv\") pod \"redhat-operators-hzjdt\" (UID: \"a0d6aa78-4ffd-417c-afbb-980932c162a7\") " pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.190486 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0d6aa78-4ffd-417c-afbb-980932c162a7-catalog-content\") pod \"redhat-operators-hzjdt\" (UID: \"a0d6aa78-4ffd-417c-afbb-980932c162a7\") " pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.190602 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0d6aa78-4ffd-417c-afbb-980932c162a7-utilities\") pod \"redhat-operators-hzjdt\" (UID: \"a0d6aa78-4ffd-417c-afbb-980932c162a7\") " pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.294451 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxjzv\" (UniqueName: \"kubernetes.io/projected/a0d6aa78-4ffd-417c-afbb-980932c162a7-kube-api-access-cxjzv\") pod \"redhat-operators-hzjdt\" (UID: \"a0d6aa78-4ffd-417c-afbb-980932c162a7\") " pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.294737 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0d6aa78-4ffd-417c-afbb-980932c162a7-catalog-content\") pod \"redhat-operators-hzjdt\" (UID: \"a0d6aa78-4ffd-417c-afbb-980932c162a7\") " pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.294766 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0d6aa78-4ffd-417c-afbb-980932c162a7-utilities\") pod \"redhat-operators-hzjdt\" (UID: \"a0d6aa78-4ffd-417c-afbb-980932c162a7\") " pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.297861 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0d6aa78-4ffd-417c-afbb-980932c162a7-catalog-content\") pod \"redhat-operators-hzjdt\" (UID: \"a0d6aa78-4ffd-417c-afbb-980932c162a7\") " pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.299151 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0d6aa78-4ffd-417c-afbb-980932c162a7-utilities\") pod \"redhat-operators-hzjdt\" (UID: \"a0d6aa78-4ffd-417c-afbb-980932c162a7\") " pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.319929 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxjzv\" (UniqueName: \"kubernetes.io/projected/a0d6aa78-4ffd-417c-afbb-980932c162a7-kube-api-access-cxjzv\") pod \"redhat-operators-hzjdt\" (UID: \"a0d6aa78-4ffd-417c-afbb-980932c162a7\") " pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:02 crc kubenswrapper[4900]: I0127 13:41:02.433741 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:03 crc kubenswrapper[4900]: I0127 13:41:03.045366 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hzjdt"] Jan 27 13:41:03 crc kubenswrapper[4900]: I0127 13:41:03.791685 4900 generic.go:334] "Generic (PLEG): container finished" podID="a0d6aa78-4ffd-417c-afbb-980932c162a7" containerID="49b40ebdbfd6d007140eeb1a9052c04b6a33c5ab55f4df4d20a86d956d05c800" exitCode=0 Jan 27 13:41:03 crc kubenswrapper[4900]: I0127 13:41:03.791800 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hzjdt" event={"ID":"a0d6aa78-4ffd-417c-afbb-980932c162a7","Type":"ContainerDied","Data":"49b40ebdbfd6d007140eeb1a9052c04b6a33c5ab55f4df4d20a86d956d05c800"} Jan 27 13:41:03 crc kubenswrapper[4900]: I0127 13:41:03.792455 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hzjdt" event={"ID":"a0d6aa78-4ffd-417c-afbb-980932c162a7","Type":"ContainerStarted","Data":"6def0cf1a7ea2299ba247b5c8ae525d4ea29bd163c9f3e03cf5d7d25efb326ca"} Jan 27 13:41:05 crc kubenswrapper[4900]: I0127 13:41:05.832108 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hzjdt" event={"ID":"a0d6aa78-4ffd-417c-afbb-980932c162a7","Type":"ContainerStarted","Data":"0d039fa684eff3d06cee9de4b38129efd4b12d1ffd6d340cc65910c0914afa82"} Jan 27 13:41:12 crc kubenswrapper[4900]: I0127 13:41:12.805265 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:41:12 crc kubenswrapper[4900]: E0127 13:41:12.819107 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:41:16 crc kubenswrapper[4900]: I0127 13:41:16.261432 4900 generic.go:334] "Generic (PLEG): container finished" podID="a0d6aa78-4ffd-417c-afbb-980932c162a7" containerID="0d039fa684eff3d06cee9de4b38129efd4b12d1ffd6d340cc65910c0914afa82" exitCode=0 Jan 27 13:41:16 crc kubenswrapper[4900]: I0127 13:41:16.261533 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hzjdt" event={"ID":"a0d6aa78-4ffd-417c-afbb-980932c162a7","Type":"ContainerDied","Data":"0d039fa684eff3d06cee9de4b38129efd4b12d1ffd6d340cc65910c0914afa82"} Jan 27 13:41:18 crc kubenswrapper[4900]: I0127 13:41:18.598311 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hzjdt" event={"ID":"a0d6aa78-4ffd-417c-afbb-980932c162a7","Type":"ContainerStarted","Data":"0f93d590abfdeee6c0f7dcd2610a9b90d177dbedeed709859030a7ddb592bb62"} Jan 27 13:41:18 crc kubenswrapper[4900]: I0127 13:41:18.638837 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hzjdt" podStartSLOduration=3.088200183 podStartE2EDuration="16.638804539s" podCreationTimestamp="2026-01-27 13:41:02 +0000 UTC" firstStartedPulling="2026-01-27 13:41:03.798184012 +0000 UTC m=+4491.035212222" lastFinishedPulling="2026-01-27 13:41:17.348788368 +0000 UTC m=+4504.585816578" observedRunningTime="2026-01-27 13:41:18.623404866 +0000 UTC m=+4505.860433076" watchObservedRunningTime="2026-01-27 13:41:18.638804539 +0000 UTC m=+4505.875832749" Jan 27 13:41:22 crc kubenswrapper[4900]: I0127 13:41:22.435742 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:22 crc kubenswrapper[4900]: I0127 13:41:22.436627 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:23 crc kubenswrapper[4900]: I0127 13:41:23.489454 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hzjdt" podUID="a0d6aa78-4ffd-417c-afbb-980932c162a7" containerName="registry-server" probeResult="failure" output=< Jan 27 13:41:23 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:41:23 crc kubenswrapper[4900]: > Jan 27 13:41:26 crc kubenswrapper[4900]: I0127 13:41:26.493606 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:41:26 crc kubenswrapper[4900]: E0127 13:41:26.494850 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:41:33 crc kubenswrapper[4900]: I0127 13:41:33.522966 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hzjdt" podUID="a0d6aa78-4ffd-417c-afbb-980932c162a7" containerName="registry-server" probeResult="failure" output=< Jan 27 13:41:33 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:41:33 crc kubenswrapper[4900]: > Jan 27 13:41:40 crc kubenswrapper[4900]: I0127 13:41:40.482558 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:41:40 crc kubenswrapper[4900]: E0127 13:41:40.483717 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:41:41 crc kubenswrapper[4900]: E0127 13:41:41.482612 4900 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.220:36288->38.102.83.220:38021: write tcp 38.102.83.220:36288->38.102.83.220:38021: write: broken pipe Jan 27 13:41:42 crc kubenswrapper[4900]: I0127 13:41:42.505763 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:42 crc kubenswrapper[4900]: I0127 13:41:42.574788 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:42 crc kubenswrapper[4900]: I0127 13:41:42.779878 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hzjdt"] Jan 27 13:41:44 crc kubenswrapper[4900]: I0127 13:41:44.113885 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hzjdt" podUID="a0d6aa78-4ffd-417c-afbb-980932c162a7" containerName="registry-server" containerID="cri-o://0f93d590abfdeee6c0f7dcd2610a9b90d177dbedeed709859030a7ddb592bb62" gracePeriod=2 Jan 27 13:41:44 crc kubenswrapper[4900]: I0127 13:41:44.787823 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:44 crc kubenswrapper[4900]: I0127 13:41:44.927160 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cxjzv\" (UniqueName: \"kubernetes.io/projected/a0d6aa78-4ffd-417c-afbb-980932c162a7-kube-api-access-cxjzv\") pod \"a0d6aa78-4ffd-417c-afbb-980932c162a7\" (UID: \"a0d6aa78-4ffd-417c-afbb-980932c162a7\") " Jan 27 13:41:44 crc kubenswrapper[4900]: I0127 13:41:44.927544 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0d6aa78-4ffd-417c-afbb-980932c162a7-catalog-content\") pod \"a0d6aa78-4ffd-417c-afbb-980932c162a7\" (UID: \"a0d6aa78-4ffd-417c-afbb-980932c162a7\") " Jan 27 13:41:44 crc kubenswrapper[4900]: I0127 13:41:44.927824 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0d6aa78-4ffd-417c-afbb-980932c162a7-utilities\") pod \"a0d6aa78-4ffd-417c-afbb-980932c162a7\" (UID: \"a0d6aa78-4ffd-417c-afbb-980932c162a7\") " Jan 27 13:41:44 crc kubenswrapper[4900]: I0127 13:41:44.929044 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0d6aa78-4ffd-417c-afbb-980932c162a7-utilities" (OuterVolumeSpecName: "utilities") pod "a0d6aa78-4ffd-417c-afbb-980932c162a7" (UID: "a0d6aa78-4ffd-417c-afbb-980932c162a7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.031374 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0d6aa78-4ffd-417c-afbb-980932c162a7-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.088674 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0d6aa78-4ffd-417c-afbb-980932c162a7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a0d6aa78-4ffd-417c-afbb-980932c162a7" (UID: "a0d6aa78-4ffd-417c-afbb-980932c162a7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.128133 4900 generic.go:334] "Generic (PLEG): container finished" podID="a0d6aa78-4ffd-417c-afbb-980932c162a7" containerID="0f93d590abfdeee6c0f7dcd2610a9b90d177dbedeed709859030a7ddb592bb62" exitCode=0 Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.128186 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hzjdt" event={"ID":"a0d6aa78-4ffd-417c-afbb-980932c162a7","Type":"ContainerDied","Data":"0f93d590abfdeee6c0f7dcd2610a9b90d177dbedeed709859030a7ddb592bb62"} Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.128225 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hzjdt" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.128245 4900 scope.go:117] "RemoveContainer" containerID="0f93d590abfdeee6c0f7dcd2610a9b90d177dbedeed709859030a7ddb592bb62" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.128232 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hzjdt" event={"ID":"a0d6aa78-4ffd-417c-afbb-980932c162a7","Type":"ContainerDied","Data":"6def0cf1a7ea2299ba247b5c8ae525d4ea29bd163c9f3e03cf5d7d25efb326ca"} Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.133949 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0d6aa78-4ffd-417c-afbb-980932c162a7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.173124 4900 scope.go:117] "RemoveContainer" containerID="0d039fa684eff3d06cee9de4b38129efd4b12d1ffd6d340cc65910c0914afa82" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.230786 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0d6aa78-4ffd-417c-afbb-980932c162a7-kube-api-access-cxjzv" (OuterVolumeSpecName: "kube-api-access-cxjzv") pod "a0d6aa78-4ffd-417c-afbb-980932c162a7" (UID: "a0d6aa78-4ffd-417c-afbb-980932c162a7"). InnerVolumeSpecName "kube-api-access-cxjzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.237585 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cxjzv\" (UniqueName: \"kubernetes.io/projected/a0d6aa78-4ffd-417c-afbb-980932c162a7-kube-api-access-cxjzv\") on node \"crc\" DevicePath \"\"" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.283884 4900 scope.go:117] "RemoveContainer" containerID="49b40ebdbfd6d007140eeb1a9052c04b6a33c5ab55f4df4d20a86d956d05c800" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.731171 4900 scope.go:117] "RemoveContainer" containerID="0f93d590abfdeee6c0f7dcd2610a9b90d177dbedeed709859030a7ddb592bb62" Jan 27 13:41:45 crc kubenswrapper[4900]: E0127 13:41:45.732275 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f93d590abfdeee6c0f7dcd2610a9b90d177dbedeed709859030a7ddb592bb62\": container with ID starting with 0f93d590abfdeee6c0f7dcd2610a9b90d177dbedeed709859030a7ddb592bb62 not found: ID does not exist" containerID="0f93d590abfdeee6c0f7dcd2610a9b90d177dbedeed709859030a7ddb592bb62" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.732462 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f93d590abfdeee6c0f7dcd2610a9b90d177dbedeed709859030a7ddb592bb62"} err="failed to get container status \"0f93d590abfdeee6c0f7dcd2610a9b90d177dbedeed709859030a7ddb592bb62\": rpc error: code = NotFound desc = could not find container \"0f93d590abfdeee6c0f7dcd2610a9b90d177dbedeed709859030a7ddb592bb62\": container with ID starting with 0f93d590abfdeee6c0f7dcd2610a9b90d177dbedeed709859030a7ddb592bb62 not found: ID does not exist" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.732636 4900 scope.go:117] "RemoveContainer" containerID="0d039fa684eff3d06cee9de4b38129efd4b12d1ffd6d340cc65910c0914afa82" Jan 27 13:41:45 crc kubenswrapper[4900]: E0127 13:41:45.734892 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d039fa684eff3d06cee9de4b38129efd4b12d1ffd6d340cc65910c0914afa82\": container with ID starting with 0d039fa684eff3d06cee9de4b38129efd4b12d1ffd6d340cc65910c0914afa82 not found: ID does not exist" containerID="0d039fa684eff3d06cee9de4b38129efd4b12d1ffd6d340cc65910c0914afa82" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.734985 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d039fa684eff3d06cee9de4b38129efd4b12d1ffd6d340cc65910c0914afa82"} err="failed to get container status \"0d039fa684eff3d06cee9de4b38129efd4b12d1ffd6d340cc65910c0914afa82\": rpc error: code = NotFound desc = could not find container \"0d039fa684eff3d06cee9de4b38129efd4b12d1ffd6d340cc65910c0914afa82\": container with ID starting with 0d039fa684eff3d06cee9de4b38129efd4b12d1ffd6d340cc65910c0914afa82 not found: ID does not exist" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.735049 4900 scope.go:117] "RemoveContainer" containerID="49b40ebdbfd6d007140eeb1a9052c04b6a33c5ab55f4df4d20a86d956d05c800" Jan 27 13:41:45 crc kubenswrapper[4900]: E0127 13:41:45.735934 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49b40ebdbfd6d007140eeb1a9052c04b6a33c5ab55f4df4d20a86d956d05c800\": container with ID starting with 49b40ebdbfd6d007140eeb1a9052c04b6a33c5ab55f4df4d20a86d956d05c800 not found: ID does not exist" containerID="49b40ebdbfd6d007140eeb1a9052c04b6a33c5ab55f4df4d20a86d956d05c800" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.735982 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49b40ebdbfd6d007140eeb1a9052c04b6a33c5ab55f4df4d20a86d956d05c800"} err="failed to get container status \"49b40ebdbfd6d007140eeb1a9052c04b6a33c5ab55f4df4d20a86d956d05c800\": rpc error: code = NotFound desc = could not find container \"49b40ebdbfd6d007140eeb1a9052c04b6a33c5ab55f4df4d20a86d956d05c800\": container with ID starting with 49b40ebdbfd6d007140eeb1a9052c04b6a33c5ab55f4df4d20a86d956d05c800 not found: ID does not exist" Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.775109 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hzjdt"] Jan 27 13:41:45 crc kubenswrapper[4900]: I0127 13:41:45.784740 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hzjdt"] Jan 27 13:41:46 crc kubenswrapper[4900]: I0127 13:41:46.498905 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0d6aa78-4ffd-417c-afbb-980932c162a7" path="/var/lib/kubelet/pods/a0d6aa78-4ffd-417c-afbb-980932c162a7/volumes" Jan 27 13:41:51 crc kubenswrapper[4900]: I0127 13:41:51.484355 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:41:51 crc kubenswrapper[4900]: E0127 13:41:51.487053 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:42:03 crc kubenswrapper[4900]: I0127 13:42:03.484686 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:42:03 crc kubenswrapper[4900]: E0127 13:42:03.485801 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:42:16 crc kubenswrapper[4900]: I0127 13:42:16.494447 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:42:16 crc kubenswrapper[4900]: E0127 13:42:16.495885 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:42:28 crc kubenswrapper[4900]: I0127 13:42:28.482753 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:42:28 crc kubenswrapper[4900]: E0127 13:42:28.483894 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.164112 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j8tpl"] Jan 27 13:42:32 crc kubenswrapper[4900]: E0127 13:42:32.165372 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0d6aa78-4ffd-417c-afbb-980932c162a7" containerName="extract-utilities" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.165390 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0d6aa78-4ffd-417c-afbb-980932c162a7" containerName="extract-utilities" Jan 27 13:42:32 crc kubenswrapper[4900]: E0127 13:42:32.165405 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0d6aa78-4ffd-417c-afbb-980932c162a7" containerName="registry-server" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.165415 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0d6aa78-4ffd-417c-afbb-980932c162a7" containerName="registry-server" Jan 27 13:42:32 crc kubenswrapper[4900]: E0127 13:42:32.165429 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0d6aa78-4ffd-417c-afbb-980932c162a7" containerName="extract-content" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.165435 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0d6aa78-4ffd-417c-afbb-980932c162a7" containerName="extract-content" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.165738 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0d6aa78-4ffd-417c-afbb-980932c162a7" containerName="registry-server" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.167950 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.190702 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j8tpl"] Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.286255 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c19277f-1246-4b29-9002-703865e15c95-utilities\") pod \"certified-operators-j8tpl\" (UID: \"7c19277f-1246-4b29-9002-703865e15c95\") " pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.286328 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgl22\" (UniqueName: \"kubernetes.io/projected/7c19277f-1246-4b29-9002-703865e15c95-kube-api-access-jgl22\") pod \"certified-operators-j8tpl\" (UID: \"7c19277f-1246-4b29-9002-703865e15c95\") " pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.286501 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c19277f-1246-4b29-9002-703865e15c95-catalog-content\") pod \"certified-operators-j8tpl\" (UID: \"7c19277f-1246-4b29-9002-703865e15c95\") " pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.390805 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c19277f-1246-4b29-9002-703865e15c95-catalog-content\") pod \"certified-operators-j8tpl\" (UID: \"7c19277f-1246-4b29-9002-703865e15c95\") " pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.391022 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c19277f-1246-4b29-9002-703865e15c95-utilities\") pod \"certified-operators-j8tpl\" (UID: \"7c19277f-1246-4b29-9002-703865e15c95\") " pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.391068 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgl22\" (UniqueName: \"kubernetes.io/projected/7c19277f-1246-4b29-9002-703865e15c95-kube-api-access-jgl22\") pod \"certified-operators-j8tpl\" (UID: \"7c19277f-1246-4b29-9002-703865e15c95\") " pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.391607 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c19277f-1246-4b29-9002-703865e15c95-utilities\") pod \"certified-operators-j8tpl\" (UID: \"7c19277f-1246-4b29-9002-703865e15c95\") " pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.391607 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c19277f-1246-4b29-9002-703865e15c95-catalog-content\") pod \"certified-operators-j8tpl\" (UID: \"7c19277f-1246-4b29-9002-703865e15c95\") " pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.417016 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgl22\" (UniqueName: \"kubernetes.io/projected/7c19277f-1246-4b29-9002-703865e15c95-kube-api-access-jgl22\") pod \"certified-operators-j8tpl\" (UID: \"7c19277f-1246-4b29-9002-703865e15c95\") " pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:32 crc kubenswrapper[4900]: I0127 13:42:32.560787 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:33 crc kubenswrapper[4900]: I0127 13:42:33.186763 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j8tpl"] Jan 27 13:42:34 crc kubenswrapper[4900]: I0127 13:42:34.079333 4900 generic.go:334] "Generic (PLEG): container finished" podID="7c19277f-1246-4b29-9002-703865e15c95" containerID="56f6886ce81dee88f5ed344ab6b89159646cc4def12ea9aacd9628bebd0e621f" exitCode=0 Jan 27 13:42:34 crc kubenswrapper[4900]: I0127 13:42:34.079464 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j8tpl" event={"ID":"7c19277f-1246-4b29-9002-703865e15c95","Type":"ContainerDied","Data":"56f6886ce81dee88f5ed344ab6b89159646cc4def12ea9aacd9628bebd0e621f"} Jan 27 13:42:34 crc kubenswrapper[4900]: I0127 13:42:34.079868 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j8tpl" event={"ID":"7c19277f-1246-4b29-9002-703865e15c95","Type":"ContainerStarted","Data":"75f4611bc447ec4ff87e34713ecfcf6416fec5955c779ad9e8c0ff41bccc2281"} Jan 27 13:42:36 crc kubenswrapper[4900]: I0127 13:42:36.113144 4900 generic.go:334] "Generic (PLEG): container finished" podID="7c19277f-1246-4b29-9002-703865e15c95" containerID="11266a4990110f3c0fa94791cd692d0b63a5203b125104a8773c80cd3f07ebc9" exitCode=0 Jan 27 13:42:36 crc kubenswrapper[4900]: I0127 13:42:36.113252 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j8tpl" event={"ID":"7c19277f-1246-4b29-9002-703865e15c95","Type":"ContainerDied","Data":"11266a4990110f3c0fa94791cd692d0b63a5203b125104a8773c80cd3f07ebc9"} Jan 27 13:42:37 crc kubenswrapper[4900]: I0127 13:42:37.134897 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j8tpl" event={"ID":"7c19277f-1246-4b29-9002-703865e15c95","Type":"ContainerStarted","Data":"be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d"} Jan 27 13:42:37 crc kubenswrapper[4900]: I0127 13:42:37.166609 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j8tpl" podStartSLOduration=2.727382541 podStartE2EDuration="5.166577929s" podCreationTimestamp="2026-01-27 13:42:32 +0000 UTC" firstStartedPulling="2026-01-27 13:42:34.084688797 +0000 UTC m=+4581.321717007" lastFinishedPulling="2026-01-27 13:42:36.523884195 +0000 UTC m=+4583.760912395" observedRunningTime="2026-01-27 13:42:37.165315183 +0000 UTC m=+4584.402343403" watchObservedRunningTime="2026-01-27 13:42:37.166577929 +0000 UTC m=+4584.403606139" Jan 27 13:42:41 crc kubenswrapper[4900]: I0127 13:42:41.483111 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:42:41 crc kubenswrapper[4900]: E0127 13:42:41.484591 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:42:42 crc kubenswrapper[4900]: I0127 13:42:42.561755 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:42 crc kubenswrapper[4900]: I0127 13:42:42.561838 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:42 crc kubenswrapper[4900]: I0127 13:42:42.643994 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:43 crc kubenswrapper[4900]: I0127 13:42:43.755804 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:43 crc kubenswrapper[4900]: I0127 13:42:43.821412 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j8tpl"] Jan 27 13:42:45 crc kubenswrapper[4900]: I0127 13:42:45.245910 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-j8tpl" podUID="7c19277f-1246-4b29-9002-703865e15c95" containerName="registry-server" containerID="cri-o://be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d" gracePeriod=2 Jan 27 13:42:45 crc kubenswrapper[4900]: E0127 13:42:45.551623 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7c19277f_1246_4b29_9002_703865e15c95.slice/crio-be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7c19277f_1246_4b29_9002_703865e15c95.slice/crio-conmon-be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d.scope\": RecentStats: unable to find data in memory cache]" Jan 27 13:42:45 crc kubenswrapper[4900]: I0127 13:42:45.873737 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.010675 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c19277f-1246-4b29-9002-703865e15c95-catalog-content\") pod \"7c19277f-1246-4b29-9002-703865e15c95\" (UID: \"7c19277f-1246-4b29-9002-703865e15c95\") " Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.010940 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgl22\" (UniqueName: \"kubernetes.io/projected/7c19277f-1246-4b29-9002-703865e15c95-kube-api-access-jgl22\") pod \"7c19277f-1246-4b29-9002-703865e15c95\" (UID: \"7c19277f-1246-4b29-9002-703865e15c95\") " Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.011096 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c19277f-1246-4b29-9002-703865e15c95-utilities\") pod \"7c19277f-1246-4b29-9002-703865e15c95\" (UID: \"7c19277f-1246-4b29-9002-703865e15c95\") " Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.013350 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c19277f-1246-4b29-9002-703865e15c95-utilities" (OuterVolumeSpecName: "utilities") pod "7c19277f-1246-4b29-9002-703865e15c95" (UID: "7c19277f-1246-4b29-9002-703865e15c95"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.021220 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c19277f-1246-4b29-9002-703865e15c95-kube-api-access-jgl22" (OuterVolumeSpecName: "kube-api-access-jgl22") pod "7c19277f-1246-4b29-9002-703865e15c95" (UID: "7c19277f-1246-4b29-9002-703865e15c95"). InnerVolumeSpecName "kube-api-access-jgl22". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.082405 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c19277f-1246-4b29-9002-703865e15c95-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7c19277f-1246-4b29-9002-703865e15c95" (UID: "7c19277f-1246-4b29-9002-703865e15c95"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.115427 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c19277f-1246-4b29-9002-703865e15c95-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.115470 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgl22\" (UniqueName: \"kubernetes.io/projected/7c19277f-1246-4b29-9002-703865e15c95-kube-api-access-jgl22\") on node \"crc\" DevicePath \"\"" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.115484 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c19277f-1246-4b29-9002-703865e15c95-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.269074 4900 generic.go:334] "Generic (PLEG): container finished" podID="7c19277f-1246-4b29-9002-703865e15c95" containerID="be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d" exitCode=0 Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.270297 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j8tpl" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.270341 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j8tpl" event={"ID":"7c19277f-1246-4b29-9002-703865e15c95","Type":"ContainerDied","Data":"be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d"} Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.271175 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j8tpl" event={"ID":"7c19277f-1246-4b29-9002-703865e15c95","Type":"ContainerDied","Data":"75f4611bc447ec4ff87e34713ecfcf6416fec5955c779ad9e8c0ff41bccc2281"} Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.275563 4900 scope.go:117] "RemoveContainer" containerID="be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.326349 4900 scope.go:117] "RemoveContainer" containerID="11266a4990110f3c0fa94791cd692d0b63a5203b125104a8773c80cd3f07ebc9" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.326631 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j8tpl"] Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.341963 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-j8tpl"] Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.366321 4900 scope.go:117] "RemoveContainer" containerID="56f6886ce81dee88f5ed344ab6b89159646cc4def12ea9aacd9628bebd0e621f" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.436262 4900 scope.go:117] "RemoveContainer" containerID="be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d" Jan 27 13:42:46 crc kubenswrapper[4900]: E0127 13:42:46.436985 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d\": container with ID starting with be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d not found: ID does not exist" containerID="be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.437067 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d"} err="failed to get container status \"be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d\": rpc error: code = NotFound desc = could not find container \"be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d\": container with ID starting with be1181490873c54155de55dc121c56e5482d29308b96b60368c5b48f4450943d not found: ID does not exist" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.437102 4900 scope.go:117] "RemoveContainer" containerID="11266a4990110f3c0fa94791cd692d0b63a5203b125104a8773c80cd3f07ebc9" Jan 27 13:42:46 crc kubenswrapper[4900]: E0127 13:42:46.437446 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11266a4990110f3c0fa94791cd692d0b63a5203b125104a8773c80cd3f07ebc9\": container with ID starting with 11266a4990110f3c0fa94791cd692d0b63a5203b125104a8773c80cd3f07ebc9 not found: ID does not exist" containerID="11266a4990110f3c0fa94791cd692d0b63a5203b125104a8773c80cd3f07ebc9" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.437465 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11266a4990110f3c0fa94791cd692d0b63a5203b125104a8773c80cd3f07ebc9"} err="failed to get container status \"11266a4990110f3c0fa94791cd692d0b63a5203b125104a8773c80cd3f07ebc9\": rpc error: code = NotFound desc = could not find container \"11266a4990110f3c0fa94791cd692d0b63a5203b125104a8773c80cd3f07ebc9\": container with ID starting with 11266a4990110f3c0fa94791cd692d0b63a5203b125104a8773c80cd3f07ebc9 not found: ID does not exist" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.437479 4900 scope.go:117] "RemoveContainer" containerID="56f6886ce81dee88f5ed344ab6b89159646cc4def12ea9aacd9628bebd0e621f" Jan 27 13:42:46 crc kubenswrapper[4900]: E0127 13:42:46.437716 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56f6886ce81dee88f5ed344ab6b89159646cc4def12ea9aacd9628bebd0e621f\": container with ID starting with 56f6886ce81dee88f5ed344ab6b89159646cc4def12ea9aacd9628bebd0e621f not found: ID does not exist" containerID="56f6886ce81dee88f5ed344ab6b89159646cc4def12ea9aacd9628bebd0e621f" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.437741 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56f6886ce81dee88f5ed344ab6b89159646cc4def12ea9aacd9628bebd0e621f"} err="failed to get container status \"56f6886ce81dee88f5ed344ab6b89159646cc4def12ea9aacd9628bebd0e621f\": rpc error: code = NotFound desc = could not find container \"56f6886ce81dee88f5ed344ab6b89159646cc4def12ea9aacd9628bebd0e621f\": container with ID starting with 56f6886ce81dee88f5ed344ab6b89159646cc4def12ea9aacd9628bebd0e621f not found: ID does not exist" Jan 27 13:42:46 crc kubenswrapper[4900]: I0127 13:42:46.501437 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c19277f-1246-4b29-9002-703865e15c95" path="/var/lib/kubelet/pods/7c19277f-1246-4b29-9002-703865e15c95/volumes" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.325938 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mbc4s"] Jan 27 13:42:54 crc kubenswrapper[4900]: E0127 13:42:54.327490 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c19277f-1246-4b29-9002-703865e15c95" containerName="registry-server" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.327508 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c19277f-1246-4b29-9002-703865e15c95" containerName="registry-server" Jan 27 13:42:54 crc kubenswrapper[4900]: E0127 13:42:54.327530 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c19277f-1246-4b29-9002-703865e15c95" containerName="extract-content" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.327536 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c19277f-1246-4b29-9002-703865e15c95" containerName="extract-content" Jan 27 13:42:54 crc kubenswrapper[4900]: E0127 13:42:54.327575 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c19277f-1246-4b29-9002-703865e15c95" containerName="extract-utilities" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.327582 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c19277f-1246-4b29-9002-703865e15c95" containerName="extract-utilities" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.327910 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c19277f-1246-4b29-9002-703865e15c95" containerName="registry-server" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.334614 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.391889 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mbc4s"] Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.460436 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58386dae-26a2-4ff2-9267-41b4f9506830-catalog-content\") pod \"community-operators-mbc4s\" (UID: \"58386dae-26a2-4ff2-9267-41b4f9506830\") " pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.460531 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58386dae-26a2-4ff2-9267-41b4f9506830-utilities\") pod \"community-operators-mbc4s\" (UID: \"58386dae-26a2-4ff2-9267-41b4f9506830\") " pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.460696 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvj5m\" (UniqueName: \"kubernetes.io/projected/58386dae-26a2-4ff2-9267-41b4f9506830-kube-api-access-vvj5m\") pod \"community-operators-mbc4s\" (UID: \"58386dae-26a2-4ff2-9267-41b4f9506830\") " pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.482552 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:42:54 crc kubenswrapper[4900]: E0127 13:42:54.483019 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.564602 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58386dae-26a2-4ff2-9267-41b4f9506830-catalog-content\") pod \"community-operators-mbc4s\" (UID: \"58386dae-26a2-4ff2-9267-41b4f9506830\") " pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.564742 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58386dae-26a2-4ff2-9267-41b4f9506830-utilities\") pod \"community-operators-mbc4s\" (UID: \"58386dae-26a2-4ff2-9267-41b4f9506830\") " pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.564981 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvj5m\" (UniqueName: \"kubernetes.io/projected/58386dae-26a2-4ff2-9267-41b4f9506830-kube-api-access-vvj5m\") pod \"community-operators-mbc4s\" (UID: \"58386dae-26a2-4ff2-9267-41b4f9506830\") " pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.565939 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58386dae-26a2-4ff2-9267-41b4f9506830-catalog-content\") pod \"community-operators-mbc4s\" (UID: \"58386dae-26a2-4ff2-9267-41b4f9506830\") " pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.568001 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58386dae-26a2-4ff2-9267-41b4f9506830-utilities\") pod \"community-operators-mbc4s\" (UID: \"58386dae-26a2-4ff2-9267-41b4f9506830\") " pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.593762 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvj5m\" (UniqueName: \"kubernetes.io/projected/58386dae-26a2-4ff2-9267-41b4f9506830-kube-api-access-vvj5m\") pod \"community-operators-mbc4s\" (UID: \"58386dae-26a2-4ff2-9267-41b4f9506830\") " pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:42:54 crc kubenswrapper[4900]: I0127 13:42:54.713194 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:42:55 crc kubenswrapper[4900]: I0127 13:42:55.375040 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mbc4s"] Jan 27 13:42:55 crc kubenswrapper[4900]: I0127 13:42:55.445393 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mbc4s" event={"ID":"58386dae-26a2-4ff2-9267-41b4f9506830","Type":"ContainerStarted","Data":"111734d5dd5b3bf9bba7030e53da564e4105867ee32e3ecf2bc8d40b909ea47e"} Jan 27 13:42:56 crc kubenswrapper[4900]: I0127 13:42:56.469428 4900 generic.go:334] "Generic (PLEG): container finished" podID="58386dae-26a2-4ff2-9267-41b4f9506830" containerID="285eeb7338e3fb2f0a46a27933efa257efdcffeab0e642d465f6fe03242d0c4e" exitCode=0 Jan 27 13:42:56 crc kubenswrapper[4900]: I0127 13:42:56.469661 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mbc4s" event={"ID":"58386dae-26a2-4ff2-9267-41b4f9506830","Type":"ContainerDied","Data":"285eeb7338e3fb2f0a46a27933efa257efdcffeab0e642d465f6fe03242d0c4e"} Jan 27 13:42:56 crc kubenswrapper[4900]: I0127 13:42:56.472802 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 13:42:58 crc kubenswrapper[4900]: I0127 13:42:58.503257 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mbc4s" event={"ID":"58386dae-26a2-4ff2-9267-41b4f9506830","Type":"ContainerStarted","Data":"1e9d1144c76eacbef095074494bcdda728ec088fb8c289d31285d7b2220796fe"} Jan 27 13:43:01 crc kubenswrapper[4900]: I0127 13:43:01.586199 4900 generic.go:334] "Generic (PLEG): container finished" podID="58386dae-26a2-4ff2-9267-41b4f9506830" containerID="1e9d1144c76eacbef095074494bcdda728ec088fb8c289d31285d7b2220796fe" exitCode=0 Jan 27 13:43:01 crc kubenswrapper[4900]: I0127 13:43:01.586497 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mbc4s" event={"ID":"58386dae-26a2-4ff2-9267-41b4f9506830","Type":"ContainerDied","Data":"1e9d1144c76eacbef095074494bcdda728ec088fb8c289d31285d7b2220796fe"} Jan 27 13:43:02 crc kubenswrapper[4900]: I0127 13:43:02.606972 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mbc4s" event={"ID":"58386dae-26a2-4ff2-9267-41b4f9506830","Type":"ContainerStarted","Data":"9bb240b0e9ab789b4920af77fff0430af86f8a4d21ee9417dc9d6ddecef8111d"} Jan 27 13:43:02 crc kubenswrapper[4900]: I0127 13:43:02.668520 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mbc4s" podStartSLOduration=3.0976385889999998 podStartE2EDuration="8.668483292s" podCreationTimestamp="2026-01-27 13:42:54 +0000 UTC" firstStartedPulling="2026-01-27 13:42:56.472490529 +0000 UTC m=+4603.709518739" lastFinishedPulling="2026-01-27 13:43:02.043335232 +0000 UTC m=+4609.280363442" observedRunningTime="2026-01-27 13:43:02.639813857 +0000 UTC m=+4609.876842067" watchObservedRunningTime="2026-01-27 13:43:02.668483292 +0000 UTC m=+4609.905511502" Jan 27 13:43:04 crc kubenswrapper[4900]: I0127 13:43:04.716427 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:43:04 crc kubenswrapper[4900]: I0127 13:43:04.718907 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:43:05 crc kubenswrapper[4900]: I0127 13:43:05.865275 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-mbc4s" podUID="58386dae-26a2-4ff2-9267-41b4f9506830" containerName="registry-server" probeResult="failure" output=< Jan 27 13:43:05 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:43:05 crc kubenswrapper[4900]: > Jan 27 13:43:09 crc kubenswrapper[4900]: I0127 13:43:09.483127 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:43:09 crc kubenswrapper[4900]: E0127 13:43:09.484322 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:43:14 crc kubenswrapper[4900]: I0127 13:43:14.784439 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:43:14 crc kubenswrapper[4900]: I0127 13:43:14.859429 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:43:15 crc kubenswrapper[4900]: I0127 13:43:15.039984 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mbc4s"] Jan 27 13:43:16 crc kubenswrapper[4900]: I0127 13:43:16.950915 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mbc4s" podUID="58386dae-26a2-4ff2-9267-41b4f9506830" containerName="registry-server" containerID="cri-o://9bb240b0e9ab789b4920af77fff0430af86f8a4d21ee9417dc9d6ddecef8111d" gracePeriod=2 Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.536228 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.663753 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58386dae-26a2-4ff2-9267-41b4f9506830-catalog-content\") pod \"58386dae-26a2-4ff2-9267-41b4f9506830\" (UID: \"58386dae-26a2-4ff2-9267-41b4f9506830\") " Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.663965 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58386dae-26a2-4ff2-9267-41b4f9506830-utilities\") pod \"58386dae-26a2-4ff2-9267-41b4f9506830\" (UID: \"58386dae-26a2-4ff2-9267-41b4f9506830\") " Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.664245 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvj5m\" (UniqueName: \"kubernetes.io/projected/58386dae-26a2-4ff2-9267-41b4f9506830-kube-api-access-vvj5m\") pod \"58386dae-26a2-4ff2-9267-41b4f9506830\" (UID: \"58386dae-26a2-4ff2-9267-41b4f9506830\") " Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.665126 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58386dae-26a2-4ff2-9267-41b4f9506830-utilities" (OuterVolumeSpecName: "utilities") pod "58386dae-26a2-4ff2-9267-41b4f9506830" (UID: "58386dae-26a2-4ff2-9267-41b4f9506830"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.667476 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58386dae-26a2-4ff2-9267-41b4f9506830-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.690873 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58386dae-26a2-4ff2-9267-41b4f9506830-kube-api-access-vvj5m" (OuterVolumeSpecName: "kube-api-access-vvj5m") pod "58386dae-26a2-4ff2-9267-41b4f9506830" (UID: "58386dae-26a2-4ff2-9267-41b4f9506830"). InnerVolumeSpecName "kube-api-access-vvj5m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.727273 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58386dae-26a2-4ff2-9267-41b4f9506830-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "58386dae-26a2-4ff2-9267-41b4f9506830" (UID: "58386dae-26a2-4ff2-9267-41b4f9506830"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.771855 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58386dae-26a2-4ff2-9267-41b4f9506830-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.771913 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvj5m\" (UniqueName: \"kubernetes.io/projected/58386dae-26a2-4ff2-9267-41b4f9506830-kube-api-access-vvj5m\") on node \"crc\" DevicePath \"\"" Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.974829 4900 generic.go:334] "Generic (PLEG): container finished" podID="58386dae-26a2-4ff2-9267-41b4f9506830" containerID="9bb240b0e9ab789b4920af77fff0430af86f8a4d21ee9417dc9d6ddecef8111d" exitCode=0 Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.974870 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mbc4s" event={"ID":"58386dae-26a2-4ff2-9267-41b4f9506830","Type":"ContainerDied","Data":"9bb240b0e9ab789b4920af77fff0430af86f8a4d21ee9417dc9d6ddecef8111d"} Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.974941 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mbc4s" event={"ID":"58386dae-26a2-4ff2-9267-41b4f9506830","Type":"ContainerDied","Data":"111734d5dd5b3bf9bba7030e53da564e4105867ee32e3ecf2bc8d40b909ea47e"} Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.974967 4900 scope.go:117] "RemoveContainer" containerID="9bb240b0e9ab789b4920af77fff0430af86f8a4d21ee9417dc9d6ddecef8111d" Jan 27 13:43:17 crc kubenswrapper[4900]: I0127 13:43:17.975002 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mbc4s" Jan 27 13:43:18 crc kubenswrapper[4900]: I0127 13:43:18.022604 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mbc4s"] Jan 27 13:43:18 crc kubenswrapper[4900]: I0127 13:43:18.041975 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mbc4s"] Jan 27 13:43:18 crc kubenswrapper[4900]: I0127 13:43:18.044554 4900 scope.go:117] "RemoveContainer" containerID="1e9d1144c76eacbef095074494bcdda728ec088fb8c289d31285d7b2220796fe" Jan 27 13:43:18 crc kubenswrapper[4900]: I0127 13:43:18.082260 4900 scope.go:117] "RemoveContainer" containerID="285eeb7338e3fb2f0a46a27933efa257efdcffeab0e642d465f6fe03242d0c4e" Jan 27 13:43:18 crc kubenswrapper[4900]: I0127 13:43:18.153442 4900 scope.go:117] "RemoveContainer" containerID="9bb240b0e9ab789b4920af77fff0430af86f8a4d21ee9417dc9d6ddecef8111d" Jan 27 13:43:18 crc kubenswrapper[4900]: E0127 13:43:18.154245 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9bb240b0e9ab789b4920af77fff0430af86f8a4d21ee9417dc9d6ddecef8111d\": container with ID starting with 9bb240b0e9ab789b4920af77fff0430af86f8a4d21ee9417dc9d6ddecef8111d not found: ID does not exist" containerID="9bb240b0e9ab789b4920af77fff0430af86f8a4d21ee9417dc9d6ddecef8111d" Jan 27 13:43:18 crc kubenswrapper[4900]: I0127 13:43:18.154283 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bb240b0e9ab789b4920af77fff0430af86f8a4d21ee9417dc9d6ddecef8111d"} err="failed to get container status \"9bb240b0e9ab789b4920af77fff0430af86f8a4d21ee9417dc9d6ddecef8111d\": rpc error: code = NotFound desc = could not find container \"9bb240b0e9ab789b4920af77fff0430af86f8a4d21ee9417dc9d6ddecef8111d\": container with ID starting with 9bb240b0e9ab789b4920af77fff0430af86f8a4d21ee9417dc9d6ddecef8111d not found: ID does not exist" Jan 27 13:43:18 crc kubenswrapper[4900]: I0127 13:43:18.154312 4900 scope.go:117] "RemoveContainer" containerID="1e9d1144c76eacbef095074494bcdda728ec088fb8c289d31285d7b2220796fe" Jan 27 13:43:18 crc kubenswrapper[4900]: E0127 13:43:18.157083 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e9d1144c76eacbef095074494bcdda728ec088fb8c289d31285d7b2220796fe\": container with ID starting with 1e9d1144c76eacbef095074494bcdda728ec088fb8c289d31285d7b2220796fe not found: ID does not exist" containerID="1e9d1144c76eacbef095074494bcdda728ec088fb8c289d31285d7b2220796fe" Jan 27 13:43:18 crc kubenswrapper[4900]: I0127 13:43:18.157145 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e9d1144c76eacbef095074494bcdda728ec088fb8c289d31285d7b2220796fe"} err="failed to get container status \"1e9d1144c76eacbef095074494bcdda728ec088fb8c289d31285d7b2220796fe\": rpc error: code = NotFound desc = could not find container \"1e9d1144c76eacbef095074494bcdda728ec088fb8c289d31285d7b2220796fe\": container with ID starting with 1e9d1144c76eacbef095074494bcdda728ec088fb8c289d31285d7b2220796fe not found: ID does not exist" Jan 27 13:43:18 crc kubenswrapper[4900]: I0127 13:43:18.157164 4900 scope.go:117] "RemoveContainer" containerID="285eeb7338e3fb2f0a46a27933efa257efdcffeab0e642d465f6fe03242d0c4e" Jan 27 13:43:18 crc kubenswrapper[4900]: E0127 13:43:18.157850 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"285eeb7338e3fb2f0a46a27933efa257efdcffeab0e642d465f6fe03242d0c4e\": container with ID starting with 285eeb7338e3fb2f0a46a27933efa257efdcffeab0e642d465f6fe03242d0c4e not found: ID does not exist" containerID="285eeb7338e3fb2f0a46a27933efa257efdcffeab0e642d465f6fe03242d0c4e" Jan 27 13:43:18 crc kubenswrapper[4900]: I0127 13:43:18.157875 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"285eeb7338e3fb2f0a46a27933efa257efdcffeab0e642d465f6fe03242d0c4e"} err="failed to get container status \"285eeb7338e3fb2f0a46a27933efa257efdcffeab0e642d465f6fe03242d0c4e\": rpc error: code = NotFound desc = could not find container \"285eeb7338e3fb2f0a46a27933efa257efdcffeab0e642d465f6fe03242d0c4e\": container with ID starting with 285eeb7338e3fb2f0a46a27933efa257efdcffeab0e642d465f6fe03242d0c4e not found: ID does not exist" Jan 27 13:43:18 crc kubenswrapper[4900]: I0127 13:43:18.501085 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58386dae-26a2-4ff2-9267-41b4f9506830" path="/var/lib/kubelet/pods/58386dae-26a2-4ff2-9267-41b4f9506830/volumes" Jan 27 13:43:24 crc kubenswrapper[4900]: I0127 13:43:24.483086 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:43:24 crc kubenswrapper[4900]: E0127 13:43:24.485301 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:43:35 crc kubenswrapper[4900]: I0127 13:43:35.483293 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:43:35 crc kubenswrapper[4900]: E0127 13:43:35.484289 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:43:42 crc kubenswrapper[4900]: I0127 13:43:42.816920 4900 trace.go:236] Trace[1771461605]: "Calculate volume metrics of persistence for pod openstack/rabbitmq-server-2" (27-Jan-2026 13:43:38.942) (total time: 3872ms): Jan 27 13:43:42 crc kubenswrapper[4900]: Trace[1771461605]: [3.8720404s] [3.8720404s] END Jan 27 13:43:42 crc kubenswrapper[4900]: I0127 13:43:42.840791 4900 trace.go:236] Trace[455595920]: "Calculate volume metrics of wal for pod openshift-logging/logging-loki-ingester-0" (27-Jan-2026 13:43:40.838) (total time: 2002ms): Jan 27 13:43:42 crc kubenswrapper[4900]: Trace[455595920]: [2.002426191s] [2.002426191s] END Jan 27 13:43:42 crc kubenswrapper[4900]: I0127 13:43:42.854127 4900 trace.go:236] Trace[342877662]: "Calculate volume metrics of prometheus-metric-storage-db for pod openstack/prometheus-metric-storage-0" (27-Jan-2026 13:43:40.871) (total time: 1982ms): Jan 27 13:43:42 crc kubenswrapper[4900]: Trace[342877662]: [1.982277981s] [1.982277981s] END Jan 27 13:43:44 crc kubenswrapper[4900]: E0127 13:43:44.972427 4900 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.220:50770->38.102.83.220:38021: write tcp 38.102.83.220:50770->38.102.83.220:38021: write: connection reset by peer Jan 27 13:43:46 crc kubenswrapper[4900]: I0127 13:43:46.497041 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:43:46 crc kubenswrapper[4900]: E0127 13:43:46.497690 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:43:57 crc kubenswrapper[4900]: I0127 13:43:57.482675 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:43:59 crc kubenswrapper[4900]: I0127 13:43:59.554781 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"e61aa6c67f9230704f1a0085bb75a6c32087d793e1a8a6664a8e90dda39d1378"} Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.174311 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96"] Jan 27 13:45:00 crc kubenswrapper[4900]: E0127 13:45:00.176019 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58386dae-26a2-4ff2-9267-41b4f9506830" containerName="registry-server" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.176046 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="58386dae-26a2-4ff2-9267-41b4f9506830" containerName="registry-server" Jan 27 13:45:00 crc kubenswrapper[4900]: E0127 13:45:00.176076 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58386dae-26a2-4ff2-9267-41b4f9506830" containerName="extract-utilities" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.176087 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="58386dae-26a2-4ff2-9267-41b4f9506830" containerName="extract-utilities" Jan 27 13:45:00 crc kubenswrapper[4900]: E0127 13:45:00.176134 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58386dae-26a2-4ff2-9267-41b4f9506830" containerName="extract-content" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.176144 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="58386dae-26a2-4ff2-9267-41b4f9506830" containerName="extract-content" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.176535 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="58386dae-26a2-4ff2-9267-41b4f9506830" containerName="registry-server" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.178033 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.182475 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.183663 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.198402 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96"] Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.211868 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a213cc66-7a29-4ebc-99ec-5f609e067565-secret-volume\") pod \"collect-profiles-29492025-gll96\" (UID: \"a213cc66-7a29-4ebc-99ec-5f609e067565\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.212333 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlj8j\" (UniqueName: \"kubernetes.io/projected/a213cc66-7a29-4ebc-99ec-5f609e067565-kube-api-access-rlj8j\") pod \"collect-profiles-29492025-gll96\" (UID: \"a213cc66-7a29-4ebc-99ec-5f609e067565\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.212493 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a213cc66-7a29-4ebc-99ec-5f609e067565-config-volume\") pod \"collect-profiles-29492025-gll96\" (UID: \"a213cc66-7a29-4ebc-99ec-5f609e067565\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.315294 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a213cc66-7a29-4ebc-99ec-5f609e067565-secret-volume\") pod \"collect-profiles-29492025-gll96\" (UID: \"a213cc66-7a29-4ebc-99ec-5f609e067565\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.315444 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlj8j\" (UniqueName: \"kubernetes.io/projected/a213cc66-7a29-4ebc-99ec-5f609e067565-kube-api-access-rlj8j\") pod \"collect-profiles-29492025-gll96\" (UID: \"a213cc66-7a29-4ebc-99ec-5f609e067565\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.315506 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a213cc66-7a29-4ebc-99ec-5f609e067565-config-volume\") pod \"collect-profiles-29492025-gll96\" (UID: \"a213cc66-7a29-4ebc-99ec-5f609e067565\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.316690 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a213cc66-7a29-4ebc-99ec-5f609e067565-config-volume\") pod \"collect-profiles-29492025-gll96\" (UID: \"a213cc66-7a29-4ebc-99ec-5f609e067565\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.323824 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a213cc66-7a29-4ebc-99ec-5f609e067565-secret-volume\") pod \"collect-profiles-29492025-gll96\" (UID: \"a213cc66-7a29-4ebc-99ec-5f609e067565\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.338083 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlj8j\" (UniqueName: \"kubernetes.io/projected/a213cc66-7a29-4ebc-99ec-5f609e067565-kube-api-access-rlj8j\") pod \"collect-profiles-29492025-gll96\" (UID: \"a213cc66-7a29-4ebc-99ec-5f609e067565\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" Jan 27 13:45:00 crc kubenswrapper[4900]: I0127 13:45:00.508474 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" Jan 27 13:45:01 crc kubenswrapper[4900]: I0127 13:45:01.089374 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96"] Jan 27 13:45:01 crc kubenswrapper[4900]: I0127 13:45:01.448811 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" event={"ID":"a213cc66-7a29-4ebc-99ec-5f609e067565","Type":"ContainerStarted","Data":"a3c94e55c1742872a83021e458408f3efec687f172c05b75643f5fb839469e3a"} Jan 27 13:45:01 crc kubenswrapper[4900]: I0127 13:45:01.449304 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" event={"ID":"a213cc66-7a29-4ebc-99ec-5f609e067565","Type":"ContainerStarted","Data":"c6022b53398512841cac407dcbf26235de9fdc0925cbe0c6e1f15fd23efcd74c"} Jan 27 13:45:02 crc kubenswrapper[4900]: I0127 13:45:02.468313 4900 generic.go:334] "Generic (PLEG): container finished" podID="a213cc66-7a29-4ebc-99ec-5f609e067565" containerID="a3c94e55c1742872a83021e458408f3efec687f172c05b75643f5fb839469e3a" exitCode=0 Jan 27 13:45:02 crc kubenswrapper[4900]: I0127 13:45:02.468408 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" event={"ID":"a213cc66-7a29-4ebc-99ec-5f609e067565","Type":"ContainerDied","Data":"a3c94e55c1742872a83021e458408f3efec687f172c05b75643f5fb839469e3a"} Jan 27 13:45:04 crc kubenswrapper[4900]: I0127 13:45:04.018038 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" Jan 27 13:45:04 crc kubenswrapper[4900]: I0127 13:45:04.069766 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a213cc66-7a29-4ebc-99ec-5f609e067565-config-volume\") pod \"a213cc66-7a29-4ebc-99ec-5f609e067565\" (UID: \"a213cc66-7a29-4ebc-99ec-5f609e067565\") " Jan 27 13:45:04 crc kubenswrapper[4900]: I0127 13:45:04.069858 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlj8j\" (UniqueName: \"kubernetes.io/projected/a213cc66-7a29-4ebc-99ec-5f609e067565-kube-api-access-rlj8j\") pod \"a213cc66-7a29-4ebc-99ec-5f609e067565\" (UID: \"a213cc66-7a29-4ebc-99ec-5f609e067565\") " Jan 27 13:45:04 crc kubenswrapper[4900]: I0127 13:45:04.070161 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a213cc66-7a29-4ebc-99ec-5f609e067565-secret-volume\") pod \"a213cc66-7a29-4ebc-99ec-5f609e067565\" (UID: \"a213cc66-7a29-4ebc-99ec-5f609e067565\") " Jan 27 13:45:04 crc kubenswrapper[4900]: I0127 13:45:04.073373 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a213cc66-7a29-4ebc-99ec-5f609e067565-config-volume" (OuterVolumeSpecName: "config-volume") pod "a213cc66-7a29-4ebc-99ec-5f609e067565" (UID: "a213cc66-7a29-4ebc-99ec-5f609e067565"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 13:45:04 crc kubenswrapper[4900]: I0127 13:45:04.080395 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a213cc66-7a29-4ebc-99ec-5f609e067565-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a213cc66-7a29-4ebc-99ec-5f609e067565" (UID: "a213cc66-7a29-4ebc-99ec-5f609e067565"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 13:45:04 crc kubenswrapper[4900]: I0127 13:45:04.080605 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a213cc66-7a29-4ebc-99ec-5f609e067565-kube-api-access-rlj8j" (OuterVolumeSpecName: "kube-api-access-rlj8j") pod "a213cc66-7a29-4ebc-99ec-5f609e067565" (UID: "a213cc66-7a29-4ebc-99ec-5f609e067565"). InnerVolumeSpecName "kube-api-access-rlj8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:45:04 crc kubenswrapper[4900]: I0127 13:45:04.174458 4900 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a213cc66-7a29-4ebc-99ec-5f609e067565-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 13:45:04 crc kubenswrapper[4900]: I0127 13:45:04.174546 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlj8j\" (UniqueName: \"kubernetes.io/projected/a213cc66-7a29-4ebc-99ec-5f609e067565-kube-api-access-rlj8j\") on node \"crc\" DevicePath \"\"" Jan 27 13:45:04 crc kubenswrapper[4900]: I0127 13:45:04.174596 4900 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a213cc66-7a29-4ebc-99ec-5f609e067565-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 13:45:04 crc kubenswrapper[4900]: I0127 13:45:04.503513 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" Jan 27 13:45:04 crc kubenswrapper[4900]: I0127 13:45:04.504946 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492025-gll96" event={"ID":"a213cc66-7a29-4ebc-99ec-5f609e067565","Type":"ContainerDied","Data":"c6022b53398512841cac407dcbf26235de9fdc0925cbe0c6e1f15fd23efcd74c"} Jan 27 13:45:04 crc kubenswrapper[4900]: I0127 13:45:04.504993 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c6022b53398512841cac407dcbf26235de9fdc0925cbe0c6e1f15fd23efcd74c" Jan 27 13:45:05 crc kubenswrapper[4900]: I0127 13:45:05.131566 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688"] Jan 27 13:45:05 crc kubenswrapper[4900]: I0127 13:45:05.149421 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491980-7r688"] Jan 27 13:45:06 crc kubenswrapper[4900]: I0127 13:45:06.514938 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48f3ebad-795a-44d9-bcfb-26272131254e" path="/var/lib/kubelet/pods/48f3ebad-795a-44d9-bcfb-26272131254e/volumes" Jan 27 13:45:42 crc kubenswrapper[4900]: I0127 13:45:42.800570 4900 scope.go:117] "RemoveContainer" containerID="29e877a26a5da382039a962f9cb8ffc972fed723f8bb5c4582ad700665e53dae" Jan 27 13:46:22 crc kubenswrapper[4900]: I0127 13:46:22.372444 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:46:22 crc kubenswrapper[4900]: I0127 13:46:22.373327 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:46:52 crc kubenswrapper[4900]: I0127 13:46:52.372773 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:46:52 crc kubenswrapper[4900]: I0127 13:46:52.373557 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:47:22 crc kubenswrapper[4900]: I0127 13:47:22.372547 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:47:22 crc kubenswrapper[4900]: I0127 13:47:22.373224 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:47:22 crc kubenswrapper[4900]: I0127 13:47:22.373286 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 13:47:22 crc kubenswrapper[4900]: I0127 13:47:22.374003 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e61aa6c67f9230704f1a0085bb75a6c32087d793e1a8a6664a8e90dda39d1378"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 13:47:22 crc kubenswrapper[4900]: I0127 13:47:22.374081 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://e61aa6c67f9230704f1a0085bb75a6c32087d793e1a8a6664a8e90dda39d1378" gracePeriod=600 Jan 27 13:47:23 crc kubenswrapper[4900]: I0127 13:47:23.196375 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="e61aa6c67f9230704f1a0085bb75a6c32087d793e1a8a6664a8e90dda39d1378" exitCode=0 Jan 27 13:47:23 crc kubenswrapper[4900]: I0127 13:47:23.196498 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"e61aa6c67f9230704f1a0085bb75a6c32087d793e1a8a6664a8e90dda39d1378"} Jan 27 13:47:23 crc kubenswrapper[4900]: I0127 13:47:23.197095 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac"} Jan 27 13:47:23 crc kubenswrapper[4900]: I0127 13:47:23.197126 4900 scope.go:117] "RemoveContainer" containerID="54321ec473e189269c19fdeaa92890f676b5a0c1dd1f9a887818de06217a2ee3" Jan 27 13:47:55 crc kubenswrapper[4900]: I0127 13:47:55.847432 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pdr8g"] Jan 27 13:47:55 crc kubenswrapper[4900]: E0127 13:47:55.849122 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a213cc66-7a29-4ebc-99ec-5f609e067565" containerName="collect-profiles" Jan 27 13:47:55 crc kubenswrapper[4900]: I0127 13:47:55.849139 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a213cc66-7a29-4ebc-99ec-5f609e067565" containerName="collect-profiles" Jan 27 13:47:55 crc kubenswrapper[4900]: I0127 13:47:55.849446 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a213cc66-7a29-4ebc-99ec-5f609e067565" containerName="collect-profiles" Jan 27 13:47:55 crc kubenswrapper[4900]: I0127 13:47:55.852394 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:47:55 crc kubenswrapper[4900]: I0127 13:47:55.863645 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pdr8g"] Jan 27 13:47:55 crc kubenswrapper[4900]: I0127 13:47:55.927998 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8794b46d-a7b3-40e5-9376-104ac531dfe1-catalog-content\") pod \"redhat-marketplace-pdr8g\" (UID: \"8794b46d-a7b3-40e5-9376-104ac531dfe1\") " pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:47:55 crc kubenswrapper[4900]: I0127 13:47:55.928105 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8794b46d-a7b3-40e5-9376-104ac531dfe1-utilities\") pod \"redhat-marketplace-pdr8g\" (UID: \"8794b46d-a7b3-40e5-9376-104ac531dfe1\") " pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:47:55 crc kubenswrapper[4900]: I0127 13:47:55.930123 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6wm9\" (UniqueName: \"kubernetes.io/projected/8794b46d-a7b3-40e5-9376-104ac531dfe1-kube-api-access-q6wm9\") pod \"redhat-marketplace-pdr8g\" (UID: \"8794b46d-a7b3-40e5-9376-104ac531dfe1\") " pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:47:56 crc kubenswrapper[4900]: I0127 13:47:56.032526 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6wm9\" (UniqueName: \"kubernetes.io/projected/8794b46d-a7b3-40e5-9376-104ac531dfe1-kube-api-access-q6wm9\") pod \"redhat-marketplace-pdr8g\" (UID: \"8794b46d-a7b3-40e5-9376-104ac531dfe1\") " pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:47:56 crc kubenswrapper[4900]: I0127 13:47:56.032746 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8794b46d-a7b3-40e5-9376-104ac531dfe1-catalog-content\") pod \"redhat-marketplace-pdr8g\" (UID: \"8794b46d-a7b3-40e5-9376-104ac531dfe1\") " pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:47:56 crc kubenswrapper[4900]: I0127 13:47:56.032783 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8794b46d-a7b3-40e5-9376-104ac531dfe1-utilities\") pod \"redhat-marketplace-pdr8g\" (UID: \"8794b46d-a7b3-40e5-9376-104ac531dfe1\") " pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:47:56 crc kubenswrapper[4900]: I0127 13:47:56.033733 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8794b46d-a7b3-40e5-9376-104ac531dfe1-catalog-content\") pod \"redhat-marketplace-pdr8g\" (UID: \"8794b46d-a7b3-40e5-9376-104ac531dfe1\") " pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:47:56 crc kubenswrapper[4900]: I0127 13:47:56.033772 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8794b46d-a7b3-40e5-9376-104ac531dfe1-utilities\") pod \"redhat-marketplace-pdr8g\" (UID: \"8794b46d-a7b3-40e5-9376-104ac531dfe1\") " pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:47:56 crc kubenswrapper[4900]: I0127 13:47:56.097953 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6wm9\" (UniqueName: \"kubernetes.io/projected/8794b46d-a7b3-40e5-9376-104ac531dfe1-kube-api-access-q6wm9\") pod \"redhat-marketplace-pdr8g\" (UID: \"8794b46d-a7b3-40e5-9376-104ac531dfe1\") " pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:47:56 crc kubenswrapper[4900]: I0127 13:47:56.186665 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:47:56 crc kubenswrapper[4900]: I0127 13:47:56.858704 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pdr8g"] Jan 27 13:47:57 crc kubenswrapper[4900]: I0127 13:47:57.688189 4900 generic.go:334] "Generic (PLEG): container finished" podID="8794b46d-a7b3-40e5-9376-104ac531dfe1" containerID="659a97b424d1606b6572b601472e6a0af34687f06ac717c138f5a2f38454c531" exitCode=0 Jan 27 13:47:57 crc kubenswrapper[4900]: I0127 13:47:57.688402 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pdr8g" event={"ID":"8794b46d-a7b3-40e5-9376-104ac531dfe1","Type":"ContainerDied","Data":"659a97b424d1606b6572b601472e6a0af34687f06ac717c138f5a2f38454c531"} Jan 27 13:47:57 crc kubenswrapper[4900]: I0127 13:47:57.688621 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pdr8g" event={"ID":"8794b46d-a7b3-40e5-9376-104ac531dfe1","Type":"ContainerStarted","Data":"744f75fa1b325c90faa5d91febc66dfd85b22fbd0ebf1af971efd446599a31d1"} Jan 27 13:47:57 crc kubenswrapper[4900]: I0127 13:47:57.693254 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 13:47:59 crc kubenswrapper[4900]: I0127 13:47:59.731185 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pdr8g" event={"ID":"8794b46d-a7b3-40e5-9376-104ac531dfe1","Type":"ContainerStarted","Data":"3e19bce2e0adbe12d019ecbf4eb8059c5a49b94157d39471fef181c1564750d6"} Jan 27 13:48:01 crc kubenswrapper[4900]: I0127 13:48:01.757952 4900 generic.go:334] "Generic (PLEG): container finished" podID="8794b46d-a7b3-40e5-9376-104ac531dfe1" containerID="3e19bce2e0adbe12d019ecbf4eb8059c5a49b94157d39471fef181c1564750d6" exitCode=0 Jan 27 13:48:01 crc kubenswrapper[4900]: I0127 13:48:01.758031 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pdr8g" event={"ID":"8794b46d-a7b3-40e5-9376-104ac531dfe1","Type":"ContainerDied","Data":"3e19bce2e0adbe12d019ecbf4eb8059c5a49b94157d39471fef181c1564750d6"} Jan 27 13:48:02 crc kubenswrapper[4900]: I0127 13:48:02.791984 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pdr8g" event={"ID":"8794b46d-a7b3-40e5-9376-104ac531dfe1","Type":"ContainerStarted","Data":"9f0c245896e9e5ac3fd3051b180b2e7881027814f9f41116b2c296bd5f0dbbc4"} Jan 27 13:48:02 crc kubenswrapper[4900]: I0127 13:48:02.834222 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pdr8g" podStartSLOduration=3.073696708 podStartE2EDuration="7.834190243s" podCreationTimestamp="2026-01-27 13:47:55 +0000 UTC" firstStartedPulling="2026-01-27 13:47:57.692854719 +0000 UTC m=+4904.929882929" lastFinishedPulling="2026-01-27 13:48:02.453348254 +0000 UTC m=+4909.690376464" observedRunningTime="2026-01-27 13:48:02.819769258 +0000 UTC m=+4910.056797488" watchObservedRunningTime="2026-01-27 13:48:02.834190243 +0000 UTC m=+4910.071218453" Jan 27 13:48:06 crc kubenswrapper[4900]: I0127 13:48:06.187596 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:48:06 crc kubenswrapper[4900]: I0127 13:48:06.188364 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:48:06 crc kubenswrapper[4900]: I0127 13:48:06.243022 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:48:16 crc kubenswrapper[4900]: I0127 13:48:16.244402 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:48:16 crc kubenswrapper[4900]: I0127 13:48:16.321216 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pdr8g"] Jan 27 13:48:17 crc kubenswrapper[4900]: I0127 13:48:17.089218 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pdr8g" podUID="8794b46d-a7b3-40e5-9376-104ac531dfe1" containerName="registry-server" containerID="cri-o://9f0c245896e9e5ac3fd3051b180b2e7881027814f9f41116b2c296bd5f0dbbc4" gracePeriod=2 Jan 27 13:48:17 crc kubenswrapper[4900]: I0127 13:48:17.833013 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:48:17 crc kubenswrapper[4900]: I0127 13:48:17.940561 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6wm9\" (UniqueName: \"kubernetes.io/projected/8794b46d-a7b3-40e5-9376-104ac531dfe1-kube-api-access-q6wm9\") pod \"8794b46d-a7b3-40e5-9376-104ac531dfe1\" (UID: \"8794b46d-a7b3-40e5-9376-104ac531dfe1\") " Jan 27 13:48:17 crc kubenswrapper[4900]: I0127 13:48:17.941895 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8794b46d-a7b3-40e5-9376-104ac531dfe1-utilities\") pod \"8794b46d-a7b3-40e5-9376-104ac531dfe1\" (UID: \"8794b46d-a7b3-40e5-9376-104ac531dfe1\") " Jan 27 13:48:17 crc kubenswrapper[4900]: I0127 13:48:17.941992 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8794b46d-a7b3-40e5-9376-104ac531dfe1-catalog-content\") pod \"8794b46d-a7b3-40e5-9376-104ac531dfe1\" (UID: \"8794b46d-a7b3-40e5-9376-104ac531dfe1\") " Jan 27 13:48:17 crc kubenswrapper[4900]: I0127 13:48:17.943351 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8794b46d-a7b3-40e5-9376-104ac531dfe1-utilities" (OuterVolumeSpecName: "utilities") pod "8794b46d-a7b3-40e5-9376-104ac531dfe1" (UID: "8794b46d-a7b3-40e5-9376-104ac531dfe1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:48:17 crc kubenswrapper[4900]: I0127 13:48:17.944076 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8794b46d-a7b3-40e5-9376-104ac531dfe1-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:48:17 crc kubenswrapper[4900]: I0127 13:48:17.975131 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8794b46d-a7b3-40e5-9376-104ac531dfe1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8794b46d-a7b3-40e5-9376-104ac531dfe1" (UID: "8794b46d-a7b3-40e5-9376-104ac531dfe1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:48:17 crc kubenswrapper[4900]: I0127 13:48:17.991280 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8794b46d-a7b3-40e5-9376-104ac531dfe1-kube-api-access-q6wm9" (OuterVolumeSpecName: "kube-api-access-q6wm9") pod "8794b46d-a7b3-40e5-9376-104ac531dfe1" (UID: "8794b46d-a7b3-40e5-9376-104ac531dfe1"). InnerVolumeSpecName "kube-api-access-q6wm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.047031 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6wm9\" (UniqueName: \"kubernetes.io/projected/8794b46d-a7b3-40e5-9376-104ac531dfe1-kube-api-access-q6wm9\") on node \"crc\" DevicePath \"\"" Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.047092 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8794b46d-a7b3-40e5-9376-104ac531dfe1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.105157 4900 generic.go:334] "Generic (PLEG): container finished" podID="8794b46d-a7b3-40e5-9376-104ac531dfe1" containerID="9f0c245896e9e5ac3fd3051b180b2e7881027814f9f41116b2c296bd5f0dbbc4" exitCode=0 Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.105242 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pdr8g" event={"ID":"8794b46d-a7b3-40e5-9376-104ac531dfe1","Type":"ContainerDied","Data":"9f0c245896e9e5ac3fd3051b180b2e7881027814f9f41116b2c296bd5f0dbbc4"} Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.105297 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pdr8g" Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.105339 4900 scope.go:117] "RemoveContainer" containerID="9f0c245896e9e5ac3fd3051b180b2e7881027814f9f41116b2c296bd5f0dbbc4" Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.105316 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pdr8g" event={"ID":"8794b46d-a7b3-40e5-9376-104ac531dfe1","Type":"ContainerDied","Data":"744f75fa1b325c90faa5d91febc66dfd85b22fbd0ebf1af971efd446599a31d1"} Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.146464 4900 scope.go:117] "RemoveContainer" containerID="3e19bce2e0adbe12d019ecbf4eb8059c5a49b94157d39471fef181c1564750d6" Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.155226 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pdr8g"] Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.167351 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pdr8g"] Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.181204 4900 scope.go:117] "RemoveContainer" containerID="659a97b424d1606b6572b601472e6a0af34687f06ac717c138f5a2f38454c531" Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.270588 4900 scope.go:117] "RemoveContainer" containerID="9f0c245896e9e5ac3fd3051b180b2e7881027814f9f41116b2c296bd5f0dbbc4" Jan 27 13:48:18 crc kubenswrapper[4900]: E0127 13:48:18.271872 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f0c245896e9e5ac3fd3051b180b2e7881027814f9f41116b2c296bd5f0dbbc4\": container with ID starting with 9f0c245896e9e5ac3fd3051b180b2e7881027814f9f41116b2c296bd5f0dbbc4 not found: ID does not exist" containerID="9f0c245896e9e5ac3fd3051b180b2e7881027814f9f41116b2c296bd5f0dbbc4" Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.271975 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f0c245896e9e5ac3fd3051b180b2e7881027814f9f41116b2c296bd5f0dbbc4"} err="failed to get container status \"9f0c245896e9e5ac3fd3051b180b2e7881027814f9f41116b2c296bd5f0dbbc4\": rpc error: code = NotFound desc = could not find container \"9f0c245896e9e5ac3fd3051b180b2e7881027814f9f41116b2c296bd5f0dbbc4\": container with ID starting with 9f0c245896e9e5ac3fd3051b180b2e7881027814f9f41116b2c296bd5f0dbbc4 not found: ID does not exist" Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.272107 4900 scope.go:117] "RemoveContainer" containerID="3e19bce2e0adbe12d019ecbf4eb8059c5a49b94157d39471fef181c1564750d6" Jan 27 13:48:18 crc kubenswrapper[4900]: E0127 13:48:18.272632 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e19bce2e0adbe12d019ecbf4eb8059c5a49b94157d39471fef181c1564750d6\": container with ID starting with 3e19bce2e0adbe12d019ecbf4eb8059c5a49b94157d39471fef181c1564750d6 not found: ID does not exist" containerID="3e19bce2e0adbe12d019ecbf4eb8059c5a49b94157d39471fef181c1564750d6" Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.272710 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e19bce2e0adbe12d019ecbf4eb8059c5a49b94157d39471fef181c1564750d6"} err="failed to get container status \"3e19bce2e0adbe12d019ecbf4eb8059c5a49b94157d39471fef181c1564750d6\": rpc error: code = NotFound desc = could not find container \"3e19bce2e0adbe12d019ecbf4eb8059c5a49b94157d39471fef181c1564750d6\": container with ID starting with 3e19bce2e0adbe12d019ecbf4eb8059c5a49b94157d39471fef181c1564750d6 not found: ID does not exist" Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.272754 4900 scope.go:117] "RemoveContainer" containerID="659a97b424d1606b6572b601472e6a0af34687f06ac717c138f5a2f38454c531" Jan 27 13:48:18 crc kubenswrapper[4900]: E0127 13:48:18.273180 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"659a97b424d1606b6572b601472e6a0af34687f06ac717c138f5a2f38454c531\": container with ID starting with 659a97b424d1606b6572b601472e6a0af34687f06ac717c138f5a2f38454c531 not found: ID does not exist" containerID="659a97b424d1606b6572b601472e6a0af34687f06ac717c138f5a2f38454c531" Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.273226 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"659a97b424d1606b6572b601472e6a0af34687f06ac717c138f5a2f38454c531"} err="failed to get container status \"659a97b424d1606b6572b601472e6a0af34687f06ac717c138f5a2f38454c531\": rpc error: code = NotFound desc = could not find container \"659a97b424d1606b6572b601472e6a0af34687f06ac717c138f5a2f38454c531\": container with ID starting with 659a97b424d1606b6572b601472e6a0af34687f06ac717c138f5a2f38454c531 not found: ID does not exist" Jan 27 13:48:18 crc kubenswrapper[4900]: I0127 13:48:18.498680 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8794b46d-a7b3-40e5-9376-104ac531dfe1" path="/var/lib/kubelet/pods/8794b46d-a7b3-40e5-9376-104ac531dfe1/volumes" Jan 27 13:49:22 crc kubenswrapper[4900]: I0127 13:49:22.373180 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:49:22 crc kubenswrapper[4900]: I0127 13:49:22.376337 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:49:52 crc kubenswrapper[4900]: I0127 13:49:52.373160 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:49:52 crc kubenswrapper[4900]: I0127 13:49:52.374155 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:50:19 crc kubenswrapper[4900]: I0127 13:50:19.728316 4900 trace.go:236] Trace[1587994854]: "Calculate volume metrics of prometheus-metric-storage-db for pod openstack/prometheus-metric-storage-0" (27-Jan-2026 13:50:15.537) (total time: 4190ms): Jan 27 13:50:19 crc kubenswrapper[4900]: Trace[1587994854]: [4.190773875s] [4.190773875s] END Jan 27 13:50:19 crc kubenswrapper[4900]: I0127 13:50:19.737564 4900 trace.go:236] Trace[1259804614]: "Calculate volume metrics of ovndbcluster-sb-etc-ovn for pod openstack/ovsdbserver-sb-0" (27-Jan-2026 13:50:10.646) (total time: 9091ms): Jan 27 13:50:19 crc kubenswrapper[4900]: Trace[1259804614]: [9.091244133s] [9.091244133s] END Jan 27 13:50:22 crc kubenswrapper[4900]: I0127 13:50:22.372687 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:50:22 crc kubenswrapper[4900]: I0127 13:50:22.373502 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:50:22 crc kubenswrapper[4900]: I0127 13:50:22.373560 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 13:50:22 crc kubenswrapper[4900]: I0127 13:50:22.374882 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 13:50:22 crc kubenswrapper[4900]: I0127 13:50:22.374936 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" gracePeriod=600 Jan 27 13:50:22 crc kubenswrapper[4900]: I0127 13:50:22.704014 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" exitCode=0 Jan 27 13:50:22 crc kubenswrapper[4900]: I0127 13:50:22.704079 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac"} Jan 27 13:50:22 crc kubenswrapper[4900]: I0127 13:50:22.704617 4900 scope.go:117] "RemoveContainer" containerID="e61aa6c67f9230704f1a0085bb75a6c32087d793e1a8a6664a8e90dda39d1378" Jan 27 13:50:23 crc kubenswrapper[4900]: E0127 13:50:23.121304 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:50:23 crc kubenswrapper[4900]: I0127 13:50:23.722338 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:50:23 crc kubenswrapper[4900]: E0127 13:50:23.723519 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:50:35 crc kubenswrapper[4900]: I0127 13:50:35.483296 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:50:35 crc kubenswrapper[4900]: E0127 13:50:35.484592 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:50:46 crc kubenswrapper[4900]: I0127 13:50:46.496864 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:50:46 crc kubenswrapper[4900]: E0127 13:50:46.498914 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:50:51 crc kubenswrapper[4900]: I0127 13:50:51.913947 4900 trace.go:236] Trace[953497152]: "Calculate volume metrics of persistence for pod openstack/rabbitmq-server-2" (27-Jan-2026 13:50:50.808) (total time: 1105ms): Jan 27 13:50:51 crc kubenswrapper[4900]: Trace[953497152]: [1.105266672s] [1.105266672s] END Jan 27 13:50:59 crc kubenswrapper[4900]: I0127 13:50:59.482216 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:50:59 crc kubenswrapper[4900]: E0127 13:50:59.483384 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:51:10 crc kubenswrapper[4900]: I0127 13:51:10.483189 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:51:10 crc kubenswrapper[4900]: E0127 13:51:10.484283 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.187007 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Jan 27 13:51:21 crc kubenswrapper[4900]: E0127 13:51:21.188673 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8794b46d-a7b3-40e5-9376-104ac531dfe1" containerName="registry-server" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.188694 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8794b46d-a7b3-40e5-9376-104ac531dfe1" containerName="registry-server" Jan 27 13:51:21 crc kubenswrapper[4900]: E0127 13:51:21.188726 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8794b46d-a7b3-40e5-9376-104ac531dfe1" containerName="extract-content" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.188734 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8794b46d-a7b3-40e5-9376-104ac531dfe1" containerName="extract-content" Jan 27 13:51:21 crc kubenswrapper[4900]: E0127 13:51:21.188788 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8794b46d-a7b3-40e5-9376-104ac531dfe1" containerName="extract-utilities" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.188795 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="8794b46d-a7b3-40e5-9376-104ac531dfe1" containerName="extract-utilities" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.189152 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="8794b46d-a7b3-40e5-9376-104ac531dfe1" containerName="registry-server" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.190431 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.194939 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.195102 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.194957 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-x4bq2" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.195325 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.203972 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.327253 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ac88ca80-18bc-417b-8a7d-5ca2666524e3-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.327319 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.327353 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.327372 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.327895 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.328221 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/ac88ca80-18bc-417b-8a7d-5ca2666524e3-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.328389 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/ac88ca80-18bc-417b-8a7d-5ca2666524e3-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.328819 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ac88ca80-18bc-417b-8a7d-5ca2666524e3-config-data\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.329212 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbsds\" (UniqueName: \"kubernetes.io/projected/ac88ca80-18bc-417b-8a7d-5ca2666524e3-kube-api-access-jbsds\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.431873 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ac88ca80-18bc-417b-8a7d-5ca2666524e3-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.431953 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.432008 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.432037 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.432171 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.432277 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/ac88ca80-18bc-417b-8a7d-5ca2666524e3-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.432322 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/ac88ca80-18bc-417b-8a7d-5ca2666524e3-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.432430 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ac88ca80-18bc-417b-8a7d-5ca2666524e3-config-data\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.432537 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbsds\" (UniqueName: \"kubernetes.io/projected/ac88ca80-18bc-417b-8a7d-5ca2666524e3-kube-api-access-jbsds\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.434270 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ac88ca80-18bc-417b-8a7d-5ca2666524e3-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.434389 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.435046 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/ac88ca80-18bc-417b-8a7d-5ca2666524e3-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.437599 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/ac88ca80-18bc-417b-8a7d-5ca2666524e3-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.443597 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.445461 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.450754 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.455040 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ac88ca80-18bc-417b-8a7d-5ca2666524e3-config-data\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.460848 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbsds\" (UniqueName: \"kubernetes.io/projected/ac88ca80-18bc-417b-8a7d-5ca2666524e3-kube-api-access-jbsds\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.519240 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"tempest-tests-tempest\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " pod="openstack/tempest-tests-tempest" Jan 27 13:51:21 crc kubenswrapper[4900]: I0127 13:51:21.825759 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 27 13:51:22 crc kubenswrapper[4900]: I0127 13:51:22.532838 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 27 13:51:23 crc kubenswrapper[4900]: I0127 13:51:23.547712 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"ac88ca80-18bc-417b-8a7d-5ca2666524e3","Type":"ContainerStarted","Data":"949db1039723ef32c0e17242a545386ea87041ce1b548624149b1e4ed2a9de0f"} Jan 27 13:51:25 crc kubenswrapper[4900]: I0127 13:51:25.483832 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:51:25 crc kubenswrapper[4900]: E0127 13:51:25.485260 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:51:36 crc kubenswrapper[4900]: I0127 13:51:36.483585 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:51:36 crc kubenswrapper[4900]: E0127 13:51:36.489694 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:51:48 crc kubenswrapper[4900]: I0127 13:51:48.482768 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:51:48 crc kubenswrapper[4900]: E0127 13:51:48.483852 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:52:01 crc kubenswrapper[4900]: I0127 13:52:01.482304 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:52:01 crc kubenswrapper[4900]: E0127 13:52:01.483202 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:52:12 crc kubenswrapper[4900]: I0127 13:52:12.482127 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:52:12 crc kubenswrapper[4900]: E0127 13:52:12.483016 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:52:16 crc kubenswrapper[4900]: I0127 13:52:16.112680 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bngd7"] Jan 27 13:52:16 crc kubenswrapper[4900]: I0127 13:52:16.116679 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:52:16 crc kubenswrapper[4900]: I0127 13:52:16.146950 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bngd7"] Jan 27 13:52:16 crc kubenswrapper[4900]: I0127 13:52:16.252395 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-catalog-content\") pod \"redhat-operators-bngd7\" (UID: \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\") " pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:52:16 crc kubenswrapper[4900]: I0127 13:52:16.252815 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-utilities\") pod \"redhat-operators-bngd7\" (UID: \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\") " pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:52:16 crc kubenswrapper[4900]: I0127 13:52:16.252865 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxwf4\" (UniqueName: \"kubernetes.io/projected/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-kube-api-access-xxwf4\") pod \"redhat-operators-bngd7\" (UID: \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\") " pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:52:16 crc kubenswrapper[4900]: I0127 13:52:16.356149 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxwf4\" (UniqueName: \"kubernetes.io/projected/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-kube-api-access-xxwf4\") pod \"redhat-operators-bngd7\" (UID: \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\") " pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:52:16 crc kubenswrapper[4900]: I0127 13:52:16.356454 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-catalog-content\") pod \"redhat-operators-bngd7\" (UID: \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\") " pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:52:16 crc kubenswrapper[4900]: I0127 13:52:16.356506 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-utilities\") pod \"redhat-operators-bngd7\" (UID: \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\") " pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:52:16 crc kubenswrapper[4900]: I0127 13:52:16.357141 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-utilities\") pod \"redhat-operators-bngd7\" (UID: \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\") " pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:52:16 crc kubenswrapper[4900]: I0127 13:52:16.357716 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-catalog-content\") pod \"redhat-operators-bngd7\" (UID: \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\") " pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:52:16 crc kubenswrapper[4900]: I0127 13:52:16.390135 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxwf4\" (UniqueName: \"kubernetes.io/projected/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-kube-api-access-xxwf4\") pod \"redhat-operators-bngd7\" (UID: \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\") " pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:52:16 crc kubenswrapper[4900]: I0127 13:52:16.455833 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:52:23 crc kubenswrapper[4900]: I0127 13:52:23.482931 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:52:23 crc kubenswrapper[4900]: E0127 13:52:23.484363 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:52:26 crc kubenswrapper[4900]: E0127 13:52:26.185276 4900 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Jan 27 13:52:26 crc kubenswrapper[4900]: E0127 13:52:26.192368 4900 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jbsds,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(ac88ca80-18bc-417b-8a7d-5ca2666524e3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 27 13:52:26 crc kubenswrapper[4900]: E0127 13:52:26.193612 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="ac88ca80-18bc-417b-8a7d-5ca2666524e3" Jan 27 13:52:26 crc kubenswrapper[4900]: E0127 13:52:26.539532 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="ac88ca80-18bc-417b-8a7d-5ca2666524e3" Jan 27 13:52:27 crc kubenswrapper[4900]: I0127 13:52:27.974783 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bngd7"] Jan 27 13:52:28 crc kubenswrapper[4900]: I0127 13:52:28.679072 4900 generic.go:334] "Generic (PLEG): container finished" podID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerID="a35c26b5c90bfcd044062ae5637eedba55e1f06b256e26859d5f1b397fda0076" exitCode=0 Jan 27 13:52:28 crc kubenswrapper[4900]: I0127 13:52:28.679437 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerDied","Data":"a35c26b5c90bfcd044062ae5637eedba55e1f06b256e26859d5f1b397fda0076"} Jan 27 13:52:28 crc kubenswrapper[4900]: I0127 13:52:28.679472 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerStarted","Data":"99695c4734d150f4b9b43128d23095a70ddb29428e6e542405b90992d08d5386"} Jan 27 13:52:31 crc kubenswrapper[4900]: I0127 13:52:31.721084 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerStarted","Data":"cb571937f332df49c21e8733c046378c8a22fc86e36efb696ce13d1aa746660a"} Jan 27 13:52:36 crc kubenswrapper[4900]: I0127 13:52:36.492734 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:52:36 crc kubenswrapper[4900]: E0127 13:52:36.497474 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:52:44 crc kubenswrapper[4900]: I0127 13:52:44.440298 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 27 13:52:48 crc kubenswrapper[4900]: E0127 13:52:48.292366 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80fbf804_7171_46c3_b2dc_8a01b3bddb9c.slice/crio-cb571937f332df49c21e8733c046378c8a22fc86e36efb696ce13d1aa746660a.scope\": RecentStats: unable to find data in memory cache]" Jan 27 13:52:48 crc kubenswrapper[4900]: E0127 13:52:48.292706 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80fbf804_7171_46c3_b2dc_8a01b3bddb9c.slice/crio-conmon-cb571937f332df49c21e8733c046378c8a22fc86e36efb696ce13d1aa746660a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80fbf804_7171_46c3_b2dc_8a01b3bddb9c.slice/crio-cb571937f332df49c21e8733c046378c8a22fc86e36efb696ce13d1aa746660a.scope\": RecentStats: unable to find data in memory cache]" Jan 27 13:52:48 crc kubenswrapper[4900]: I0127 13:52:48.958286 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"ac88ca80-18bc-417b-8a7d-5ca2666524e3","Type":"ContainerStarted","Data":"e851972b44f9ab9caa5895feb925588891622524f8b9ed75f3defccb1240c4be"} Jan 27 13:52:48 crc kubenswrapper[4900]: I0127 13:52:48.963287 4900 generic.go:334] "Generic (PLEG): container finished" podID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerID="cb571937f332df49c21e8733c046378c8a22fc86e36efb696ce13d1aa746660a" exitCode=0 Jan 27 13:52:48 crc kubenswrapper[4900]: I0127 13:52:48.963385 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerDied","Data":"cb571937f332df49c21e8733c046378c8a22fc86e36efb696ce13d1aa746660a"} Jan 27 13:52:49 crc kubenswrapper[4900]: I0127 13:52:49.017116 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=7.119205205 podStartE2EDuration="1m29.017071933s" podCreationTimestamp="2026-01-27 13:51:20 +0000 UTC" firstStartedPulling="2026-01-27 13:51:22.538433843 +0000 UTC m=+5109.775462053" lastFinishedPulling="2026-01-27 13:52:44.436300571 +0000 UTC m=+5191.673328781" observedRunningTime="2026-01-27 13:52:48.985968784 +0000 UTC m=+5196.222996994" watchObservedRunningTime="2026-01-27 13:52:49.017071933 +0000 UTC m=+5196.254100153" Jan 27 13:52:49 crc kubenswrapper[4900]: I0127 13:52:49.483154 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:52:49 crc kubenswrapper[4900]: E0127 13:52:49.483993 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:52:50 crc kubenswrapper[4900]: I0127 13:52:50.992628 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerStarted","Data":"cdfd6bf02e7c5c95c061fc91e5180e7eec71cd2f5ed33a4277ce0bd6600b4f30"} Jan 27 13:52:51 crc kubenswrapper[4900]: I0127 13:52:51.024366 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bngd7" podStartSLOduration=13.854974993 podStartE2EDuration="35.024343358s" podCreationTimestamp="2026-01-27 13:52:16 +0000 UTC" firstStartedPulling="2026-01-27 13:52:28.681679135 +0000 UTC m=+5175.918707345" lastFinishedPulling="2026-01-27 13:52:49.8510475 +0000 UTC m=+5197.088075710" observedRunningTime="2026-01-27 13:52:51.015703878 +0000 UTC m=+5198.252732088" watchObservedRunningTime="2026-01-27 13:52:51.024343358 +0000 UTC m=+5198.261371568" Jan 27 13:52:56 crc kubenswrapper[4900]: I0127 13:52:56.458290 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:52:56 crc kubenswrapper[4900]: I0127 13:52:56.459395 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:52:57 crc kubenswrapper[4900]: I0127 13:52:57.527832 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:52:57 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:52:57 crc kubenswrapper[4900]: > Jan 27 13:52:59 crc kubenswrapper[4900]: I0127 13:52:59.352897 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6bb7k"] Jan 27 13:52:59 crc kubenswrapper[4900]: I0127 13:52:59.357002 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:52:59 crc kubenswrapper[4900]: I0127 13:52:59.372402 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6bb7k"] Jan 27 13:52:59 crc kubenswrapper[4900]: I0127 13:52:59.548623 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcx59\" (UniqueName: \"kubernetes.io/projected/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-kube-api-access-dcx59\") pod \"certified-operators-6bb7k\" (UID: \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\") " pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:52:59 crc kubenswrapper[4900]: I0127 13:52:59.548872 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-catalog-content\") pod \"certified-operators-6bb7k\" (UID: \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\") " pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:52:59 crc kubenswrapper[4900]: I0127 13:52:59.549486 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-utilities\") pod \"certified-operators-6bb7k\" (UID: \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\") " pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:52:59 crc kubenswrapper[4900]: I0127 13:52:59.652679 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcx59\" (UniqueName: \"kubernetes.io/projected/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-kube-api-access-dcx59\") pod \"certified-operators-6bb7k\" (UID: \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\") " pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:52:59 crc kubenswrapper[4900]: I0127 13:52:59.652838 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-catalog-content\") pod \"certified-operators-6bb7k\" (UID: \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\") " pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:52:59 crc kubenswrapper[4900]: I0127 13:52:59.655272 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-utilities\") pod \"certified-operators-6bb7k\" (UID: \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\") " pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:52:59 crc kubenswrapper[4900]: I0127 13:52:59.655487 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-catalog-content\") pod \"certified-operators-6bb7k\" (UID: \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\") " pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:52:59 crc kubenswrapper[4900]: I0127 13:52:59.656168 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-utilities\") pod \"certified-operators-6bb7k\" (UID: \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\") " pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:52:59 crc kubenswrapper[4900]: I0127 13:52:59.685328 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcx59\" (UniqueName: \"kubernetes.io/projected/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-kube-api-access-dcx59\") pod \"certified-operators-6bb7k\" (UID: \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\") " pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:52:59 crc kubenswrapper[4900]: I0127 13:52:59.986244 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:53:00 crc kubenswrapper[4900]: I0127 13:53:00.485344 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:53:00 crc kubenswrapper[4900]: E0127 13:53:00.486033 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:53:00 crc kubenswrapper[4900]: I0127 13:53:00.688531 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6bb7k"] Jan 27 13:53:01 crc kubenswrapper[4900]: I0127 13:53:01.143597 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bb7k" event={"ID":"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2","Type":"ContainerStarted","Data":"d9e8ad4cfea68de4786d2b2c1a028befd6b26053c950a5c5d1e2bb38b85680ec"} Jan 27 13:53:01 crc kubenswrapper[4900]: I0127 13:53:01.146455 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bb7k" event={"ID":"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2","Type":"ContainerStarted","Data":"22edfb27e7cd5ca880bd98eaa63bfa2f210859b095b8775b94bcdff5f3d0695b"} Jan 27 13:53:01 crc kubenswrapper[4900]: I0127 13:53:01.147741 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 13:53:02 crc kubenswrapper[4900]: I0127 13:53:02.162477 4900 generic.go:334] "Generic (PLEG): container finished" podID="cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" containerID="d9e8ad4cfea68de4786d2b2c1a028befd6b26053c950a5c5d1e2bb38b85680ec" exitCode=0 Jan 27 13:53:02 crc kubenswrapper[4900]: I0127 13:53:02.162589 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bb7k" event={"ID":"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2","Type":"ContainerDied","Data":"d9e8ad4cfea68de4786d2b2c1a028befd6b26053c950a5c5d1e2bb38b85680ec"} Jan 27 13:53:04 crc kubenswrapper[4900]: I0127 13:53:04.201747 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bb7k" event={"ID":"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2","Type":"ContainerStarted","Data":"47b08cd4bf4647bd2ac8b43efa3d35691e18903a822048405da9f95f88648107"} Jan 27 13:53:07 crc kubenswrapper[4900]: I0127 13:53:07.523095 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:53:07 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:53:07 crc kubenswrapper[4900]: > Jan 27 13:53:12 crc kubenswrapper[4900]: I0127 13:53:12.483517 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:53:12 crc kubenswrapper[4900]: E0127 13:53:12.484657 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:53:13 crc kubenswrapper[4900]: I0127 13:53:13.354118 4900 generic.go:334] "Generic (PLEG): container finished" podID="cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" containerID="47b08cd4bf4647bd2ac8b43efa3d35691e18903a822048405da9f95f88648107" exitCode=0 Jan 27 13:53:13 crc kubenswrapper[4900]: I0127 13:53:13.354555 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bb7k" event={"ID":"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2","Type":"ContainerDied","Data":"47b08cd4bf4647bd2ac8b43efa3d35691e18903a822048405da9f95f88648107"} Jan 27 13:53:15 crc kubenswrapper[4900]: I0127 13:53:15.391624 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bb7k" event={"ID":"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2","Type":"ContainerStarted","Data":"6073089f3460db85e25b7c27858cd0a3148c3a7888f36c410ac3785d0ba24dd0"} Jan 27 13:53:15 crc kubenswrapper[4900]: I0127 13:53:15.423361 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6bb7k" podStartSLOduration=2.913982416 podStartE2EDuration="16.423340188s" podCreationTimestamp="2026-01-27 13:52:59 +0000 UTC" firstStartedPulling="2026-01-27 13:53:01.147371247 +0000 UTC m=+5208.384399457" lastFinishedPulling="2026-01-27 13:53:14.656729019 +0000 UTC m=+5221.893757229" observedRunningTime="2026-01-27 13:53:15.420141165 +0000 UTC m=+5222.657169395" watchObservedRunningTime="2026-01-27 13:53:15.423340188 +0000 UTC m=+5222.660368398" Jan 27 13:53:17 crc kubenswrapper[4900]: I0127 13:53:17.522170 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:53:17 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:53:17 crc kubenswrapper[4900]: > Jan 27 13:53:19 crc kubenswrapper[4900]: I0127 13:53:19.988298 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:53:19 crc kubenswrapper[4900]: I0127 13:53:19.988724 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:53:21 crc kubenswrapper[4900]: I0127 13:53:21.051594 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-6bb7k" podUID="cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" containerName="registry-server" probeResult="failure" output=< Jan 27 13:53:21 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:53:21 crc kubenswrapper[4900]: > Jan 27 13:53:27 crc kubenswrapper[4900]: I0127 13:53:27.482910 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:53:27 crc kubenswrapper[4900]: E0127 13:53:27.484003 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:53:27 crc kubenswrapper[4900]: I0127 13:53:27.513127 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:53:27 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:53:27 crc kubenswrapper[4900]: > Jan 27 13:53:30 crc kubenswrapper[4900]: I0127 13:53:30.146235 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:53:30 crc kubenswrapper[4900]: I0127 13:53:30.208992 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:53:30 crc kubenswrapper[4900]: I0127 13:53:30.583615 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6bb7k"] Jan 27 13:53:31 crc kubenswrapper[4900]: I0127 13:53:31.629239 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6bb7k" podUID="cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" containerName="registry-server" containerID="cri-o://6073089f3460db85e25b7c27858cd0a3148c3a7888f36c410ac3785d0ba24dd0" gracePeriod=2 Jan 27 13:53:32 crc kubenswrapper[4900]: I0127 13:53:32.651662 4900 generic.go:334] "Generic (PLEG): container finished" podID="cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" containerID="6073089f3460db85e25b7c27858cd0a3148c3a7888f36c410ac3785d0ba24dd0" exitCode=0 Jan 27 13:53:32 crc kubenswrapper[4900]: I0127 13:53:32.651742 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bb7k" event={"ID":"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2","Type":"ContainerDied","Data":"6073089f3460db85e25b7c27858cd0a3148c3a7888f36c410ac3785d0ba24dd0"} Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.083987 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.173456 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-catalog-content\") pod \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\" (UID: \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\") " Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.174114 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcx59\" (UniqueName: \"kubernetes.io/projected/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-kube-api-access-dcx59\") pod \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\" (UID: \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\") " Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.174233 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-utilities\") pod \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\" (UID: \"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2\") " Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.174951 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-utilities" (OuterVolumeSpecName: "utilities") pod "cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" (UID: "cffe25fe-feaf-44db-afd8-5d2ea5b2aef2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.175598 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.195665 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-kube-api-access-dcx59" (OuterVolumeSpecName: "kube-api-access-dcx59") pod "cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" (UID: "cffe25fe-feaf-44db-afd8-5d2ea5b2aef2"). InnerVolumeSpecName "kube-api-access-dcx59". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.248040 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" (UID: "cffe25fe-feaf-44db-afd8-5d2ea5b2aef2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.280870 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcx59\" (UniqueName: \"kubernetes.io/projected/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-kube-api-access-dcx59\") on node \"crc\" DevicePath \"\"" Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.280917 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.670310 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bb7k" event={"ID":"cffe25fe-feaf-44db-afd8-5d2ea5b2aef2","Type":"ContainerDied","Data":"22edfb27e7cd5ca880bd98eaa63bfa2f210859b095b8775b94bcdff5f3d0695b"} Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.670411 4900 scope.go:117] "RemoveContainer" containerID="6073089f3460db85e25b7c27858cd0a3148c3a7888f36c410ac3785d0ba24dd0" Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.670421 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bb7k" Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.706333 4900 scope.go:117] "RemoveContainer" containerID="47b08cd4bf4647bd2ac8b43efa3d35691e18903a822048405da9f95f88648107" Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.787353 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6bb7k"] Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.789189 4900 scope.go:117] "RemoveContainer" containerID="d9e8ad4cfea68de4786d2b2c1a028befd6b26053c950a5c5d1e2bb38b85680ec" Jan 27 13:53:33 crc kubenswrapper[4900]: I0127 13:53:33.807513 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6bb7k"] Jan 27 13:53:34 crc kubenswrapper[4900]: I0127 13:53:34.499933 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" path="/var/lib/kubelet/pods/cffe25fe-feaf-44db-afd8-5d2ea5b2aef2/volumes" Jan 27 13:53:37 crc kubenswrapper[4900]: I0127 13:53:37.510495 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:53:37 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:53:37 crc kubenswrapper[4900]: > Jan 27 13:53:40 crc kubenswrapper[4900]: I0127 13:53:40.482373 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:53:40 crc kubenswrapper[4900]: E0127 13:53:40.483482 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:53:47 crc kubenswrapper[4900]: I0127 13:53:47.578335 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:53:47 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:53:47 crc kubenswrapper[4900]: > Jan 27 13:53:54 crc kubenswrapper[4900]: I0127 13:53:54.487080 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:53:54 crc kubenswrapper[4900]: E0127 13:53:54.488290 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:53:57 crc kubenswrapper[4900]: I0127 13:53:57.509566 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:53:57 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:53:57 crc kubenswrapper[4900]: > Jan 27 13:54:07 crc kubenswrapper[4900]: I0127 13:54:07.510638 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:54:07 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:54:07 crc kubenswrapper[4900]: > Jan 27 13:54:08 crc kubenswrapper[4900]: I0127 13:54:08.482703 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:54:08 crc kubenswrapper[4900]: E0127 13:54:08.483578 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:54:17 crc kubenswrapper[4900]: I0127 13:54:17.531232 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:54:17 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:54:17 crc kubenswrapper[4900]: > Jan 27 13:54:17 crc kubenswrapper[4900]: I0127 13:54:17.986402 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-chtfl"] Jan 27 13:54:17 crc kubenswrapper[4900]: E0127 13:54:17.999091 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" containerName="extract-content" Jan 27 13:54:17 crc kubenswrapper[4900]: I0127 13:54:17.999169 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" containerName="extract-content" Jan 27 13:54:17 crc kubenswrapper[4900]: E0127 13:54:17.999339 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" containerName="registry-server" Jan 27 13:54:17 crc kubenswrapper[4900]: I0127 13:54:17.999352 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" containerName="registry-server" Jan 27 13:54:17 crc kubenswrapper[4900]: E0127 13:54:17.999376 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" containerName="extract-utilities" Jan 27 13:54:17 crc kubenswrapper[4900]: I0127 13:54:17.999386 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" containerName="extract-utilities" Jan 27 13:54:18 crc kubenswrapper[4900]: I0127 13:54:18.002302 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="cffe25fe-feaf-44db-afd8-5d2ea5b2aef2" containerName="registry-server" Jan 27 13:54:18 crc kubenswrapper[4900]: I0127 13:54:18.009721 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:54:18 crc kubenswrapper[4900]: I0127 13:54:18.150727 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-chtfl"] Jan 27 13:54:18 crc kubenswrapper[4900]: I0127 13:54:18.217015 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac68471d-4581-477c-bcff-415b8f8ea21e-catalog-content\") pod \"community-operators-chtfl\" (UID: \"ac68471d-4581-477c-bcff-415b8f8ea21e\") " pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:54:18 crc kubenswrapper[4900]: I0127 13:54:18.217837 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac68471d-4581-477c-bcff-415b8f8ea21e-utilities\") pod \"community-operators-chtfl\" (UID: \"ac68471d-4581-477c-bcff-415b8f8ea21e\") " pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:54:18 crc kubenswrapper[4900]: I0127 13:54:18.218119 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cd5pz\" (UniqueName: \"kubernetes.io/projected/ac68471d-4581-477c-bcff-415b8f8ea21e-kube-api-access-cd5pz\") pod \"community-operators-chtfl\" (UID: \"ac68471d-4581-477c-bcff-415b8f8ea21e\") " pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:54:18 crc kubenswrapper[4900]: I0127 13:54:18.321331 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac68471d-4581-477c-bcff-415b8f8ea21e-catalog-content\") pod \"community-operators-chtfl\" (UID: \"ac68471d-4581-477c-bcff-415b8f8ea21e\") " pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:54:18 crc kubenswrapper[4900]: I0127 13:54:18.321590 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac68471d-4581-477c-bcff-415b8f8ea21e-utilities\") pod \"community-operators-chtfl\" (UID: \"ac68471d-4581-477c-bcff-415b8f8ea21e\") " pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:54:18 crc kubenswrapper[4900]: I0127 13:54:18.321673 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cd5pz\" (UniqueName: \"kubernetes.io/projected/ac68471d-4581-477c-bcff-415b8f8ea21e-kube-api-access-cd5pz\") pod \"community-operators-chtfl\" (UID: \"ac68471d-4581-477c-bcff-415b8f8ea21e\") " pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:54:18 crc kubenswrapper[4900]: I0127 13:54:18.332457 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac68471d-4581-477c-bcff-415b8f8ea21e-catalog-content\") pod \"community-operators-chtfl\" (UID: \"ac68471d-4581-477c-bcff-415b8f8ea21e\") " pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:54:18 crc kubenswrapper[4900]: I0127 13:54:18.334729 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac68471d-4581-477c-bcff-415b8f8ea21e-utilities\") pod \"community-operators-chtfl\" (UID: \"ac68471d-4581-477c-bcff-415b8f8ea21e\") " pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:54:18 crc kubenswrapper[4900]: I0127 13:54:18.487322 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cd5pz\" (UniqueName: \"kubernetes.io/projected/ac68471d-4581-477c-bcff-415b8f8ea21e-kube-api-access-cd5pz\") pod \"community-operators-chtfl\" (UID: \"ac68471d-4581-477c-bcff-415b8f8ea21e\") " pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:54:18 crc kubenswrapper[4900]: I0127 13:54:18.716039 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:54:21 crc kubenswrapper[4900]: I0127 13:54:21.491196 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:54:21 crc kubenswrapper[4900]: E0127 13:54:21.492826 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:54:21 crc kubenswrapper[4900]: I0127 13:54:21.980542 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-chtfl"] Jan 27 13:54:22 crc kubenswrapper[4900]: I0127 13:54:22.379824 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-chtfl" event={"ID":"ac68471d-4581-477c-bcff-415b8f8ea21e","Type":"ContainerStarted","Data":"29fc97149564c3a61fc7c96326999df854b121850e0cc1566b35b236b71c83b1"} Jan 27 13:54:23 crc kubenswrapper[4900]: I0127 13:54:23.425245 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-chtfl" event={"ID":"ac68471d-4581-477c-bcff-415b8f8ea21e","Type":"ContainerDied","Data":"b5dc958ed45ebf020fa83bfe0b2439409af357c1c5f7d4fdcccc3d65c2bf717e"} Jan 27 13:54:23 crc kubenswrapper[4900]: I0127 13:54:23.427403 4900 generic.go:334] "Generic (PLEG): container finished" podID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerID="b5dc958ed45ebf020fa83bfe0b2439409af357c1c5f7d4fdcccc3d65c2bf717e" exitCode=0 Jan 27 13:54:27 crc kubenswrapper[4900]: I0127 13:54:27.327636 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-chtfl" event={"ID":"ac68471d-4581-477c-bcff-415b8f8ea21e","Type":"ContainerStarted","Data":"8b78ba1587e8b754259b96a88a9d4221731423045a78c55d9c96c1223bbfefde"} Jan 27 13:54:36 crc kubenswrapper[4900]: I0127 13:54:36.484179 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:54:36 crc kubenswrapper[4900]: E0127 13:54:36.486736 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:54:48 crc kubenswrapper[4900]: I0127 13:54:48.484126 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:54:48 crc kubenswrapper[4900]: E0127 13:54:48.485443 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:55:03 crc kubenswrapper[4900]: I0127 13:55:03.489788 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:55:03 crc kubenswrapper[4900]: E0127 13:55:03.493391 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:55:10 crc kubenswrapper[4900]: I0127 13:55:10.445807 4900 trace.go:236] Trace[1441887393]: "Calculate volume metrics of glance for pod openstack/glance-default-internal-api-0" (27-Jan-2026 13:54:32.594) (total time: 37797ms): Jan 27 13:55:10 crc kubenswrapper[4900]: Trace[1441887393]: [37.797765143s] [37.797765143s] END Jan 27 13:55:10 crc kubenswrapper[4900]: I0127 13:55:10.445836 4900 trace.go:236] Trace[1416068997]: "Calculate volume metrics of persistence for pod openstack/rabbitmq-server-1" (27-Jan-2026 13:54:33.012) (total time: 37389ms): Jan 27 13:55:10 crc kubenswrapper[4900]: Trace[1416068997]: [37.38995471s] [37.38995471s] END Jan 27 13:55:10 crc kubenswrapper[4900]: I0127 13:55:10.445905 4900 trace.go:236] Trace[189286487]: "Calculate volume metrics of persistence for pod openstack/rabbitmq-cell1-server-0" (27-Jan-2026 13:54:54.006) (total time: 16385ms): Jan 27 13:55:10 crc kubenswrapper[4900]: Trace[189286487]: [16.38542119s] [16.38542119s] END Jan 27 13:55:10 crc kubenswrapper[4900]: I0127 13:55:10.451142 4900 trace.go:236] Trace[1108601123]: "Calculate volume metrics of registry-storage for pod openshift-image-registry/image-registry-66df7c8f76-7xgpz" (27-Jan-2026 13:54:52.193) (total time: 18200ms): Jan 27 13:55:10 crc kubenswrapper[4900]: Trace[1108601123]: [18.200102666s] [18.200102666s] END Jan 27 13:55:10 crc kubenswrapper[4900]: I0127 13:55:10.445922 4900 trace.go:236] Trace[784495023]: "Calculate volume metrics of mysql-db for pod openstack/openstack-cell1-galera-0" (27-Jan-2026 13:54:50.949) (total time: 19485ms): Jan 27 13:55:10 crc kubenswrapper[4900]: Trace[784495023]: [19.485465564s] [19.485465564s] END Jan 27 13:55:10 crc kubenswrapper[4900]: I0127 13:55:10.451198 4900 trace.go:236] Trace[635687282]: "Calculate volume metrics of storage for pod openshift-logging/logging-loki-ingester-0" (27-Jan-2026 13:55:07.494) (total time: 2899ms): Jan 27 13:55:10 crc kubenswrapper[4900]: Trace[635687282]: [2.899599008s] [2.899599008s] END Jan 27 13:55:10 crc kubenswrapper[4900]: I0127 13:55:10.445826 4900 trace.go:236] Trace[912493685]: "Calculate volume metrics of persistence for pod openstack/rabbitmq-server-2" (27-Jan-2026 13:54:42.417) (total time: 27975ms): Jan 27 13:55:10 crc kubenswrapper[4900]: Trace[912493685]: [27.975981324s] [27.975981324s] END Jan 27 13:55:10 crc kubenswrapper[4900]: I0127 13:55:10.461478 4900 trace.go:236] Trace[231846211]: "Calculate volume metrics of prometheus-metric-storage-db for pod openstack/prometheus-metric-storage-0" (27-Jan-2026 13:54:57.246) (total time: 13214ms): Jan 27 13:55:10 crc kubenswrapper[4900]: Trace[231846211]: [13.214876338s] [13.214876338s] END Jan 27 13:55:11 crc kubenswrapper[4900]: I0127 13:55:11.021043 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-chtfl" event={"ID":"ac68471d-4581-477c-bcff-415b8f8ea21e","Type":"ContainerDied","Data":"8b78ba1587e8b754259b96a88a9d4221731423045a78c55d9c96c1223bbfefde"} Jan 27 13:55:11 crc kubenswrapper[4900]: I0127 13:55:11.029017 4900 generic.go:334] "Generic (PLEG): container finished" podID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerID="8b78ba1587e8b754259b96a88a9d4221731423045a78c55d9c96c1223bbfefde" exitCode=0 Jan 27 13:55:11 crc kubenswrapper[4900]: I0127 13:55:11.615186 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:55:11 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:55:11 crc kubenswrapper[4900]: > Jan 27 13:55:11 crc kubenswrapper[4900]: I0127 13:55:11.615341 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:55:11 crc kubenswrapper[4900]: I0127 13:55:11.617708 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"cdfd6bf02e7c5c95c061fc91e5180e7eec71cd2f5ed33a4277ce0bd6600b4f30"} pod="openshift-marketplace/redhat-operators-bngd7" containerMessage="Container registry-server failed startup probe, will be restarted" Jan 27 13:55:11 crc kubenswrapper[4900]: I0127 13:55:11.617768 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" containerID="cri-o://cdfd6bf02e7c5c95c061fc91e5180e7eec71cd2f5ed33a4277ce0bd6600b4f30" gracePeriod=30 Jan 27 13:55:13 crc kubenswrapper[4900]: I0127 13:55:13.059586 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-chtfl" event={"ID":"ac68471d-4581-477c-bcff-415b8f8ea21e","Type":"ContainerStarted","Data":"245103c4351c6a70273942d22366fc9730bdf91d4e6320e709643ddecbfec69d"} Jan 27 13:55:14 crc kubenswrapper[4900]: I0127 13:55:14.135552 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-chtfl" podStartSLOduration=8.553213114 podStartE2EDuration="57.132681292s" podCreationTimestamp="2026-01-27 13:54:17 +0000 UTC" firstStartedPulling="2026-01-27 13:54:23.428892364 +0000 UTC m=+5290.665920574" lastFinishedPulling="2026-01-27 13:55:12.008360542 +0000 UTC m=+5339.245388752" observedRunningTime="2026-01-27 13:55:14.120898601 +0000 UTC m=+5341.357926871" watchObservedRunningTime="2026-01-27 13:55:14.132681292 +0000 UTC m=+5341.369709492" Jan 27 13:55:16 crc kubenswrapper[4900]: I0127 13:55:16.502323 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:55:16 crc kubenswrapper[4900]: E0127 13:55:16.503807 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 13:55:18 crc kubenswrapper[4900]: I0127 13:55:18.716650 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:55:18 crc kubenswrapper[4900]: I0127 13:55:18.717638 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:55:19 crc kubenswrapper[4900]: I0127 13:55:19.772220 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:55:19 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:55:19 crc kubenswrapper[4900]: > Jan 27 13:55:28 crc kubenswrapper[4900]: I0127 13:55:28.502760 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:55:29 crc kubenswrapper[4900]: I0127 13:55:29.414654 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"a204cecd5425db0d7771e4d87ef37003fa244ebdee6545fd60e5e45f7ecd8382"} Jan 27 13:55:29 crc kubenswrapper[4900]: I0127 13:55:29.835909 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:55:29 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:55:29 crc kubenswrapper[4900]: > Jan 27 13:55:39 crc kubenswrapper[4900]: I0127 13:55:39.847463 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:55:39 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:55:39 crc kubenswrapper[4900]: > Jan 27 13:55:42 crc kubenswrapper[4900]: I0127 13:55:42.660118 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/0.log" Jan 27 13:55:42 crc kubenswrapper[4900]: I0127 13:55:42.668429 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerDied","Data":"cdfd6bf02e7c5c95c061fc91e5180e7eec71cd2f5ed33a4277ce0bd6600b4f30"} Jan 27 13:55:42 crc kubenswrapper[4900]: I0127 13:55:42.671934 4900 generic.go:334] "Generic (PLEG): container finished" podID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerID="cdfd6bf02e7c5c95c061fc91e5180e7eec71cd2f5ed33a4277ce0bd6600b4f30" exitCode=137 Jan 27 13:55:45 crc kubenswrapper[4900]: I0127 13:55:45.730728 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/0.log" Jan 27 13:55:45 crc kubenswrapper[4900]: I0127 13:55:45.745674 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerStarted","Data":"eef20fdab8e817a6d49a6c032c199e2fb80a02b49b4b6b68d9d74c038fb6ecdf"} Jan 27 13:55:46 crc kubenswrapper[4900]: I0127 13:55:46.467411 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:55:46 crc kubenswrapper[4900]: I0127 13:55:46.467949 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:55:47 crc kubenswrapper[4900]: I0127 13:55:47.533432 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:55:47 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:55:47 crc kubenswrapper[4900]: > Jan 27 13:55:49 crc kubenswrapper[4900]: I0127 13:55:49.804237 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:55:49 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:55:49 crc kubenswrapper[4900]: > Jan 27 13:55:57 crc kubenswrapper[4900]: I0127 13:55:57.494654 4900 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.237034482s: [/var/lib/containers/storage/overlay/c5faca893cb6a70fb172b31414e348c9b8d378b9c5231cfae3b5a35af5f88740/diff /var/log/pods/openstack_openstackclient_ded5eaa8-8d7d-4ee2-bad6-62da18024e33/openstackclient/0.log]; will not log again for this container unless duration exceeds 2s Jan 27 13:55:57 crc kubenswrapper[4900]: I0127 13:55:57.517672 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:55:57 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:55:57 crc kubenswrapper[4900]: > Jan 27 13:56:00 crc kubenswrapper[4900]: I0127 13:56:00.452389 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:56:00 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:56:00 crc kubenswrapper[4900]: > Jan 27 13:56:07 crc kubenswrapper[4900]: I0127 13:56:07.694035 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:56:07 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:56:07 crc kubenswrapper[4900]: > Jan 27 13:56:09 crc kubenswrapper[4900]: I0127 13:56:09.783872 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:56:09 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:56:09 crc kubenswrapper[4900]: > Jan 27 13:56:18 crc kubenswrapper[4900]: I0127 13:56:18.359291 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:56:18 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:56:18 crc kubenswrapper[4900]: > Jan 27 13:56:19 crc kubenswrapper[4900]: I0127 13:56:19.785627 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:56:19 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:56:19 crc kubenswrapper[4900]: > Jan 27 13:56:27 crc kubenswrapper[4900]: I0127 13:56:27.541727 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:56:27 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:56:27 crc kubenswrapper[4900]: > Jan 27 13:56:29 crc kubenswrapper[4900]: I0127 13:56:29.791033 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:56:29 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:56:29 crc kubenswrapper[4900]: > Jan 27 13:56:37 crc kubenswrapper[4900]: I0127 13:56:37.611588 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:56:37 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:56:37 crc kubenswrapper[4900]: > Jan 27 13:56:40 crc kubenswrapper[4900]: I0127 13:56:40.581974 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:56:40 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:56:40 crc kubenswrapper[4900]: > Jan 27 13:56:47 crc kubenswrapper[4900]: I0127 13:56:47.529871 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:56:47 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:56:47 crc kubenswrapper[4900]: > Jan 27 13:56:49 crc kubenswrapper[4900]: I0127 13:56:49.841093 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:56:49 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:56:49 crc kubenswrapper[4900]: > Jan 27 13:56:49 crc kubenswrapper[4900]: I0127 13:56:49.986369 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:56:49 crc kubenswrapper[4900]: I0127 13:56:49.990873 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"245103c4351c6a70273942d22366fc9730bdf91d4e6320e709643ddecbfec69d"} pod="openshift-marketplace/community-operators-chtfl" containerMessage="Container registry-server failed startup probe, will be restarted" Jan 27 13:56:49 crc kubenswrapper[4900]: I0127 13:56:49.991638 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" containerID="cri-o://245103c4351c6a70273942d22366fc9730bdf91d4e6320e709643ddecbfec69d" gracePeriod=30 Jan 27 13:56:57 crc kubenswrapper[4900]: I0127 13:56:57.563304 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:56:57 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:56:57 crc kubenswrapper[4900]: > Jan 27 13:57:04 crc kubenswrapper[4900]: I0127 13:57:04.793718 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 13:57:05 crc kubenswrapper[4900]: I0127 13:57:05.195378 4900 generic.go:334] "Generic (PLEG): container finished" podID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerID="245103c4351c6a70273942d22366fc9730bdf91d4e6320e709643ddecbfec69d" exitCode=0 Jan 27 13:57:05 crc kubenswrapper[4900]: I0127 13:57:05.247488 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-chtfl" event={"ID":"ac68471d-4581-477c-bcff-415b8f8ea21e","Type":"ContainerDied","Data":"245103c4351c6a70273942d22366fc9730bdf91d4e6320e709643ddecbfec69d"} Jan 27 13:57:07 crc kubenswrapper[4900]: I0127 13:57:07.518888 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:57:07 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:57:07 crc kubenswrapper[4900]: > Jan 27 13:57:11 crc kubenswrapper[4900]: I0127 13:57:11.280104 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-chtfl" event={"ID":"ac68471d-4581-477c-bcff-415b8f8ea21e","Type":"ContainerStarted","Data":"a8487f10c83c07590622fc9dd4f16f46872ef36a132cd0cdf5124d0695609b41"} Jan 27 13:57:17 crc kubenswrapper[4900]: I0127 13:57:17.516205 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:57:17 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:57:17 crc kubenswrapper[4900]: > Jan 27 13:57:17 crc kubenswrapper[4900]: I0127 13:57:17.518279 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:57:17 crc kubenswrapper[4900]: I0127 13:57:17.580179 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"eef20fdab8e817a6d49a6c032c199e2fb80a02b49b4b6b68d9d74c038fb6ecdf"} pod="openshift-marketplace/redhat-operators-bngd7" containerMessage="Container registry-server failed startup probe, will be restarted" Jan 27 13:57:17 crc kubenswrapper[4900]: I0127 13:57:17.580256 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" containerID="cri-o://eef20fdab8e817a6d49a6c032c199e2fb80a02b49b4b6b68d9d74c038fb6ecdf" gracePeriod=30 Jan 27 13:57:18 crc kubenswrapper[4900]: I0127 13:57:18.716614 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:57:18 crc kubenswrapper[4900]: I0127 13:57:18.717115 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:57:20 crc kubenswrapper[4900]: I0127 13:57:20.055955 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:57:20 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:57:20 crc kubenswrapper[4900]: > Jan 27 13:57:29 crc kubenswrapper[4900]: I0127 13:57:29.835861 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:57:29 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:57:29 crc kubenswrapper[4900]: > Jan 27 13:57:33 crc kubenswrapper[4900]: I0127 13:57:33.810814 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" probeResult="failure" output="command timed out" Jan 27 13:57:33 crc kubenswrapper[4900]: I0127 13:57:33.816352 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" probeResult="failure" output="command timed out" Jan 27 13:57:37 crc kubenswrapper[4900]: I0127 13:57:37.348639 4900 trace.go:236] Trace[680980212]: "Calculate volume metrics of registry-storage for pod openshift-image-registry/image-registry-66df7c8f76-7xgpz" (27-Jan-2026 13:57:36.278) (total time: 1064ms): Jan 27 13:57:37 crc kubenswrapper[4900]: Trace[680980212]: [1.064114962s] [1.064114962s] END Jan 27 13:57:40 crc kubenswrapper[4900]: I0127 13:57:40.087102 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:57:40 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:57:40 crc kubenswrapper[4900]: > Jan 27 13:57:41 crc kubenswrapper[4900]: I0127 13:57:41.176225 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 13:57:43 crc kubenswrapper[4900]: I0127 13:57:43.795861 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" probeResult="failure" output="command timed out" Jan 27 13:57:43 crc kubenswrapper[4900]: I0127 13:57:43.797029 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" probeResult="failure" output="command timed out" Jan 27 13:57:48 crc kubenswrapper[4900]: I0127 13:57:48.139724 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/1.log" Jan 27 13:57:48 crc kubenswrapper[4900]: I0127 13:57:48.161224 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/0.log" Jan 27 13:57:48 crc kubenswrapper[4900]: I0127 13:57:48.172044 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerDied","Data":"eef20fdab8e817a6d49a6c032c199e2fb80a02b49b4b6b68d9d74c038fb6ecdf"} Jan 27 13:57:48 crc kubenswrapper[4900]: I0127 13:57:48.177629 4900 scope.go:117] "RemoveContainer" containerID="cdfd6bf02e7c5c95c061fc91e5180e7eec71cd2f5ed33a4277ce0bd6600b4f30" Jan 27 13:57:48 crc kubenswrapper[4900]: I0127 13:57:48.171946 4900 generic.go:334] "Generic (PLEG): container finished" podID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerID="eef20fdab8e817a6d49a6c032c199e2fb80a02b49b4b6b68d9d74c038fb6ecdf" exitCode=137 Jan 27 13:57:49 crc kubenswrapper[4900]: I0127 13:57:49.195871 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/1.log" Jan 27 13:57:49 crc kubenswrapper[4900]: I0127 13:57:49.789219 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:57:49 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:57:49 crc kubenswrapper[4900]: > Jan 27 13:57:52 crc kubenswrapper[4900]: I0127 13:57:52.373255 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:57:52 crc kubenswrapper[4900]: I0127 13:57:52.374768 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:57:56 crc kubenswrapper[4900]: I0127 13:57:56.348038 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/1.log" Jan 27 13:57:56 crc kubenswrapper[4900]: I0127 13:57:56.352847 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerStarted","Data":"254bacb8cdc0181ab32eae1e52194721e7359505226c3523bf5d9e3315e99d48"} Jan 27 13:57:56 crc kubenswrapper[4900]: I0127 13:57:56.457102 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:57:56 crc kubenswrapper[4900]: I0127 13:57:56.457204 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:57:57 crc kubenswrapper[4900]: I0127 13:57:57.541563 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:57:57 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:57:57 crc kubenswrapper[4900]: > Jan 27 13:58:00 crc kubenswrapper[4900]: I0127 13:58:00.021344 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:58:00 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:58:00 crc kubenswrapper[4900]: > Jan 27 13:58:01 crc kubenswrapper[4900]: I0127 13:58:01.869669 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" podUID="4715fe70-acab-4dea-adde-68e1a6e8cb28" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 13:58:01 crc kubenswrapper[4900]: I0127 13:58:01.869673 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" podUID="4715fe70-acab-4dea-adde-68e1a6e8cb28" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 13:58:03 crc kubenswrapper[4900]: I0127 13:58:03.811696 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" probeResult="failure" output="command timed out" Jan 27 13:58:03 crc kubenswrapper[4900]: I0127 13:58:03.816647 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-handler-wgnlg" podUID="e78c9b0a-3d34-4f6c-9c65-0ae63482fff7" containerName="nmstate-handler" probeResult="failure" output="command timed out" Jan 27 13:58:03 crc kubenswrapper[4900]: I0127 13:58:03.816814 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" probeResult="failure" output="command timed out" Jan 27 13:58:04 crc kubenswrapper[4900]: I0127 13:58:04.794774 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 13:58:08 crc kubenswrapper[4900]: I0127 13:58:08.653722 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:58:08 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:58:08 crc kubenswrapper[4900]: > Jan 27 13:58:09 crc kubenswrapper[4900]: I0127 13:58:09.807690 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:58:09 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:58:09 crc kubenswrapper[4900]: > Jan 27 13:58:17 crc kubenswrapper[4900]: I0127 13:58:17.535552 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:58:17 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:58:17 crc kubenswrapper[4900]: > Jan 27 13:58:19 crc kubenswrapper[4900]: I0127 13:58:19.832727 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:58:19 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:58:19 crc kubenswrapper[4900]: > Jan 27 13:58:22 crc kubenswrapper[4900]: I0127 13:58:22.372461 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:58:22 crc kubenswrapper[4900]: I0127 13:58:22.456160 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:58:24 crc kubenswrapper[4900]: I0127 13:58:24.814958 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 13:58:24 crc kubenswrapper[4900]: I0127 13:58:24.815144 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 13:58:24 crc kubenswrapper[4900]: I0127 13:58:24.840319 4900 patch_prober.go:28] interesting pod/metrics-server-7dbbbb77f-fjj4n container/metrics-server namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.82:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 13:58:24 crc kubenswrapper[4900]: I0127 13:58:24.840370 4900 patch_prober.go:28] interesting pod/metrics-server-7dbbbb77f-fjj4n container/metrics-server namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.82:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 13:58:24 crc kubenswrapper[4900]: I0127 13:58:24.840405 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" podUID="61ef39b0-502c-45d5-be3a-e11c6ae19d59" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.82:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 13:58:24 crc kubenswrapper[4900]: I0127 13:58:24.840900 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" podUID="61ef39b0-502c-45d5-be3a-e11c6ae19d59" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.82:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 13:58:27 crc kubenswrapper[4900]: I0127 13:58:27.677880 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:58:27 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:58:27 crc kubenswrapper[4900]: > Jan 27 13:58:29 crc kubenswrapper[4900]: I0127 13:58:29.862498 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:58:29 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:58:29 crc kubenswrapper[4900]: > Jan 27 13:58:37 crc kubenswrapper[4900]: I0127 13:58:37.526891 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:58:37 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:58:37 crc kubenswrapper[4900]: > Jan 27 13:58:39 crc kubenswrapper[4900]: I0127 13:58:39.806244 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:58:39 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:58:39 crc kubenswrapper[4900]: > Jan 27 13:58:47 crc kubenswrapper[4900]: I0127 13:58:47.612254 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:58:47 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:58:47 crc kubenswrapper[4900]: > Jan 27 13:58:49 crc kubenswrapper[4900]: I0127 13:58:49.812600 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:58:49 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:58:49 crc kubenswrapper[4900]: > Jan 27 13:58:49 crc kubenswrapper[4900]: I0127 13:58:49.817949 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:58:49 crc kubenswrapper[4900]: I0127 13:58:49.821129 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"a8487f10c83c07590622fc9dd4f16f46872ef36a132cd0cdf5124d0695609b41"} pod="openshift-marketplace/community-operators-chtfl" containerMessage="Container registry-server failed startup probe, will be restarted" Jan 27 13:58:49 crc kubenswrapper[4900]: I0127 13:58:49.821530 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" containerID="cri-o://a8487f10c83c07590622fc9dd4f16f46872ef36a132cd0cdf5124d0695609b41" gracePeriod=30 Jan 27 13:58:52 crc kubenswrapper[4900]: I0127 13:58:52.373832 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 13:58:52 crc kubenswrapper[4900]: I0127 13:58:52.377154 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 13:58:52 crc kubenswrapper[4900]: I0127 13:58:52.377252 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 13:58:52 crc kubenswrapper[4900]: I0127 13:58:52.382615 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a204cecd5425db0d7771e4d87ef37003fa244ebdee6545fd60e5e45f7ecd8382"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 13:58:52 crc kubenswrapper[4900]: I0127 13:58:52.382754 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://a204cecd5425db0d7771e4d87ef37003fa244ebdee6545fd60e5e45f7ecd8382" gracePeriod=600 Jan 27 13:58:52 crc kubenswrapper[4900]: I0127 13:58:52.622705 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"a204cecd5425db0d7771e4d87ef37003fa244ebdee6545fd60e5e45f7ecd8382"} Jan 27 13:58:52 crc kubenswrapper[4900]: I0127 13:58:52.624660 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="a204cecd5425db0d7771e4d87ef37003fa244ebdee6545fd60e5e45f7ecd8382" exitCode=0 Jan 27 13:58:52 crc kubenswrapper[4900]: I0127 13:58:52.631676 4900 scope.go:117] "RemoveContainer" containerID="9d130e09aabc7572733e623787a46132f1b799a7dff79639de6ee983a3867dac" Jan 27 13:58:53 crc kubenswrapper[4900]: I0127 13:58:53.647907 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245"} Jan 27 13:58:57 crc kubenswrapper[4900]: I0127 13:58:57.516090 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:58:57 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:58:57 crc kubenswrapper[4900]: > Jan 27 13:59:02 crc kubenswrapper[4900]: I0127 13:59:02.809433 4900 generic.go:334] "Generic (PLEG): container finished" podID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerID="a8487f10c83c07590622fc9dd4f16f46872ef36a132cd0cdf5124d0695609b41" exitCode=0 Jan 27 13:59:02 crc kubenswrapper[4900]: I0127 13:59:02.809550 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-chtfl" event={"ID":"ac68471d-4581-477c-bcff-415b8f8ea21e","Type":"ContainerDied","Data":"a8487f10c83c07590622fc9dd4f16f46872ef36a132cd0cdf5124d0695609b41"} Jan 27 13:59:02 crc kubenswrapper[4900]: I0127 13:59:02.810786 4900 scope.go:117] "RemoveContainer" containerID="245103c4351c6a70273942d22366fc9730bdf91d4e6320e709643ddecbfec69d" Jan 27 13:59:02 crc kubenswrapper[4900]: I0127 13:59:02.883268 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 13:59:04 crc kubenswrapper[4900]: I0127 13:59:04.876193 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-chtfl" event={"ID":"ac68471d-4581-477c-bcff-415b8f8ea21e","Type":"ContainerStarted","Data":"edf34d1f731a0ed88ee92135cb225664c8ae3dfdfe5de507d718d31c06dfc817"} Jan 27 13:59:07 crc kubenswrapper[4900]: I0127 13:59:07.545943 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:59:07 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:59:07 crc kubenswrapper[4900]: > Jan 27 13:59:08 crc kubenswrapper[4900]: I0127 13:59:08.716478 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:59:08 crc kubenswrapper[4900]: I0127 13:59:08.716948 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-chtfl" Jan 27 13:59:09 crc kubenswrapper[4900]: I0127 13:59:09.786803 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:59:09 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:59:09 crc kubenswrapper[4900]: > Jan 27 13:59:17 crc kubenswrapper[4900]: I0127 13:59:17.534985 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:59:17 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:59:17 crc kubenswrapper[4900]: > Jan 27 13:59:19 crc kubenswrapper[4900]: I0127 13:59:19.796166 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:59:19 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:59:19 crc kubenswrapper[4900]: > Jan 27 13:59:20 crc kubenswrapper[4900]: I0127 13:59:20.159212 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kbwqx"] Jan 27 13:59:20 crc kubenswrapper[4900]: I0127 13:59:20.225427 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:20 crc kubenswrapper[4900]: I0127 13:59:20.286403 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-utilities\") pod \"redhat-marketplace-kbwqx\" (UID: \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\") " pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:20 crc kubenswrapper[4900]: I0127 13:59:20.286888 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-catalog-content\") pod \"redhat-marketplace-kbwqx\" (UID: \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\") " pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:20 crc kubenswrapper[4900]: I0127 13:59:20.287119 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4r44g\" (UniqueName: \"kubernetes.io/projected/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-kube-api-access-4r44g\") pod \"redhat-marketplace-kbwqx\" (UID: \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\") " pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:20 crc kubenswrapper[4900]: I0127 13:59:20.391544 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4r44g\" (UniqueName: \"kubernetes.io/projected/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-kube-api-access-4r44g\") pod \"redhat-marketplace-kbwqx\" (UID: \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\") " pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:20 crc kubenswrapper[4900]: I0127 13:59:20.391811 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-utilities\") pod \"redhat-marketplace-kbwqx\" (UID: \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\") " pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:20 crc kubenswrapper[4900]: I0127 13:59:20.391959 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-catalog-content\") pod \"redhat-marketplace-kbwqx\" (UID: \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\") " pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:20 crc kubenswrapper[4900]: I0127 13:59:20.454461 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-utilities\") pod \"redhat-marketplace-kbwqx\" (UID: \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\") " pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:20 crc kubenswrapper[4900]: I0127 13:59:20.456612 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-catalog-content\") pod \"redhat-marketplace-kbwqx\" (UID: \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\") " pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:20 crc kubenswrapper[4900]: I0127 13:59:20.763151 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kbwqx"] Jan 27 13:59:20 crc kubenswrapper[4900]: I0127 13:59:20.806962 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4r44g\" (UniqueName: \"kubernetes.io/projected/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-kube-api-access-4r44g\") pod \"redhat-marketplace-kbwqx\" (UID: \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\") " pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:20 crc kubenswrapper[4900]: I0127 13:59:20.807179 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4r44g\" (UniqueName: \"kubernetes.io/projected/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-kube-api-access-4r44g\") pod \"redhat-marketplace-kbwqx\" (UID: \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\") " pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:20 crc kubenswrapper[4900]: I0127 13:59:20.807810 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4r44g\" (UniqueName: \"kubernetes.io/projected/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-kube-api-access-4r44g\") pod \"redhat-marketplace-kbwqx\" (UID: \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\") " pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:21 crc kubenswrapper[4900]: I0127 13:59:21.051684 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:27 crc kubenswrapper[4900]: I0127 13:59:27.737594 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 13:59:27 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:59:27 crc kubenswrapper[4900]: > Jan 27 13:59:27 crc kubenswrapper[4900]: I0127 13:59:27.775550 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 13:59:27 crc kubenswrapper[4900]: I0127 13:59:27.798747 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"254bacb8cdc0181ab32eae1e52194721e7359505226c3523bf5d9e3315e99d48"} pod="openshift-marketplace/redhat-operators-bngd7" containerMessage="Container registry-server failed startup probe, will be restarted" Jan 27 13:59:27 crc kubenswrapper[4900]: I0127 13:59:27.800153 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" containerID="cri-o://254bacb8cdc0181ab32eae1e52194721e7359505226c3523bf5d9e3315e99d48" gracePeriod=30 Jan 27 13:59:29 crc kubenswrapper[4900]: I0127 13:59:29.926809 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:59:29 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:59:29 crc kubenswrapper[4900]: > Jan 27 13:59:30 crc kubenswrapper[4900]: I0127 13:59:30.958305 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kbwqx"] Jan 27 13:59:31 crc kubenswrapper[4900]: W0127 13:59:31.696764 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd0df6989_f8c7_4cdd_a1f1_1cb78a9d046b.slice/crio-a17dfa61f23eaf620eefca77de11098deb7ce474be9fb6f8c572e38cd07e5a69 WatchSource:0}: Error finding container a17dfa61f23eaf620eefca77de11098deb7ce474be9fb6f8c572e38cd07e5a69: Status 404 returned error can't find the container with id a17dfa61f23eaf620eefca77de11098deb7ce474be9fb6f8c572e38cd07e5a69 Jan 27 13:59:32 crc kubenswrapper[4900]: I0127 13:59:32.393295 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kbwqx" event={"ID":"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b","Type":"ContainerStarted","Data":"a17dfa61f23eaf620eefca77de11098deb7ce474be9fb6f8c572e38cd07e5a69"} Jan 27 13:59:33 crc kubenswrapper[4900]: I0127 13:59:33.434359 4900 generic.go:334] "Generic (PLEG): container finished" podID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerID="980113358fa2892fe8c60e2e22f411887d67801372a67fdfc4b3d9dcdc99a5d2" exitCode=0 Jan 27 13:59:33 crc kubenswrapper[4900]: I0127 13:59:33.434826 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kbwqx" event={"ID":"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b","Type":"ContainerDied","Data":"980113358fa2892fe8c60e2e22f411887d67801372a67fdfc4b3d9dcdc99a5d2"} Jan 27 13:59:38 crc kubenswrapper[4900]: I0127 13:59:38.540565 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kbwqx" event={"ID":"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b","Type":"ContainerStarted","Data":"f053fb8b27e53d0e0c0d100265588a84e1d71998ed312c1ee3db4fe3dc3b366b"} Jan 27 13:59:40 crc kubenswrapper[4900]: I0127 13:59:40.361826 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:59:40 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:59:40 crc kubenswrapper[4900]: > Jan 27 13:59:44 crc kubenswrapper[4900]: I0127 13:59:44.750342 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kbwqx" event={"ID":"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b","Type":"ContainerDied","Data":"f053fb8b27e53d0e0c0d100265588a84e1d71998ed312c1ee3db4fe3dc3b366b"} Jan 27 13:59:44 crc kubenswrapper[4900]: I0127 13:59:44.752105 4900 generic.go:334] "Generic (PLEG): container finished" podID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerID="f053fb8b27e53d0e0c0d100265588a84e1d71998ed312c1ee3db4fe3dc3b366b" exitCode=0 Jan 27 13:59:47 crc kubenswrapper[4900]: I0127 13:59:47.809372 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kbwqx" event={"ID":"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b","Type":"ContainerStarted","Data":"bdfa4c5a925247d3bcbc0deeedb7b5b18af80549c024d73a300e838001662973"} Jan 27 13:59:47 crc kubenswrapper[4900]: I0127 13:59:47.940106 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kbwqx" podStartSLOduration=16.09373466 podStartE2EDuration="28.930982547s" podCreationTimestamp="2026-01-27 13:59:19 +0000 UTC" firstStartedPulling="2026-01-27 13:59:33.442173662 +0000 UTC m=+5600.679201872" lastFinishedPulling="2026-01-27 13:59:46.279421549 +0000 UTC m=+5613.516449759" observedRunningTime="2026-01-27 13:59:47.901251397 +0000 UTC m=+5615.138279637" watchObservedRunningTime="2026-01-27 13:59:47.930982547 +0000 UTC m=+5615.168010777" Jan 27 13:59:49 crc kubenswrapper[4900]: I0127 13:59:49.967511 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:59:49 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:59:49 crc kubenswrapper[4900]: > Jan 27 13:59:51 crc kubenswrapper[4900]: I0127 13:59:51.052257 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:51 crc kubenswrapper[4900]: I0127 13:59:51.052783 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 13:59:52 crc kubenswrapper[4900]: I0127 13:59:52.115010 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-kbwqx" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" probeResult="failure" output=< Jan 27 13:59:52 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:59:52 crc kubenswrapper[4900]: > Jan 27 13:59:57 crc kubenswrapper[4900]: I0127 13:59:57.978591 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/2.log" Jan 27 13:59:58 crc kubenswrapper[4900]: I0127 13:59:57.992085 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/1.log" Jan 27 13:59:58 crc kubenswrapper[4900]: I0127 13:59:58.000114 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerDied","Data":"254bacb8cdc0181ab32eae1e52194721e7359505226c3523bf5d9e3315e99d48"} Jan 27 13:59:58 crc kubenswrapper[4900]: I0127 13:59:58.005562 4900 generic.go:334] "Generic (PLEG): container finished" podID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerID="254bacb8cdc0181ab32eae1e52194721e7359505226c3523bf5d9e3315e99d48" exitCode=137 Jan 27 13:59:58 crc kubenswrapper[4900]: I0127 13:59:58.008291 4900 scope.go:117] "RemoveContainer" containerID="eef20fdab8e817a6d49a6c032c199e2fb80a02b49b4b6b68d9d74c038fb6ecdf" Jan 27 13:59:59 crc kubenswrapper[4900]: I0127 13:59:59.022621 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/2.log" Jan 27 13:59:59 crc kubenswrapper[4900]: I0127 13:59:59.859687 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 13:59:59 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 13:59:59 crc kubenswrapper[4900]: > Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.102865 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/2.log" Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.114343 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerStarted","Data":"9991df40d410601ffe901c22904434f4a34bf385283369ac4358e2182f170478"} Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.512801 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd"] Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.616166 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.682442 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.683221 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.757328 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/582d5425-f779-4617-843e-f68dcfa2e79d-config-volume\") pod \"collect-profiles-29492040-pkrtd\" (UID: \"582d5425-f779-4617-843e-f68dcfa2e79d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.757578 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/582d5425-f779-4617-843e-f68dcfa2e79d-secret-volume\") pod \"collect-profiles-29492040-pkrtd\" (UID: \"582d5425-f779-4617-843e-f68dcfa2e79d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.757671 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvzwl\" (UniqueName: \"kubernetes.io/projected/582d5425-f779-4617-843e-f68dcfa2e79d-kube-api-access-xvzwl\") pod \"collect-profiles-29492040-pkrtd\" (UID: \"582d5425-f779-4617-843e-f68dcfa2e79d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.863301 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/582d5425-f779-4617-843e-f68dcfa2e79d-secret-volume\") pod \"collect-profiles-29492040-pkrtd\" (UID: \"582d5425-f779-4617-843e-f68dcfa2e79d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.863418 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvzwl\" (UniqueName: \"kubernetes.io/projected/582d5425-f779-4617-843e-f68dcfa2e79d-kube-api-access-xvzwl\") pod \"collect-profiles-29492040-pkrtd\" (UID: \"582d5425-f779-4617-843e-f68dcfa2e79d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.864098 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/582d5425-f779-4617-843e-f68dcfa2e79d-config-volume\") pod \"collect-profiles-29492040-pkrtd\" (UID: \"582d5425-f779-4617-843e-f68dcfa2e79d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.902717 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/582d5425-f779-4617-843e-f68dcfa2e79d-config-volume\") pod \"collect-profiles-29492040-pkrtd\" (UID: \"582d5425-f779-4617-843e-f68dcfa2e79d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.991894 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd"] Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.992525 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvzwl\" (UniqueName: \"kubernetes.io/projected/582d5425-f779-4617-843e-f68dcfa2e79d-kube-api-access-xvzwl\") pod \"collect-profiles-29492040-pkrtd\" (UID: \"582d5425-f779-4617-843e-f68dcfa2e79d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" Jan 27 14:00:01 crc kubenswrapper[4900]: I0127 14:00:01.994983 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/582d5425-f779-4617-843e-f68dcfa2e79d-secret-volume\") pod \"collect-profiles-29492040-pkrtd\" (UID: \"582d5425-f779-4617-843e-f68dcfa2e79d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" Jan 27 14:00:02 crc kubenswrapper[4900]: I0127 14:00:02.083843 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" Jan 27 14:00:02 crc kubenswrapper[4900]: I0127 14:00:02.293631 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-kbwqx" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:02 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:02 crc kubenswrapper[4900]: > Jan 27 14:00:06 crc kubenswrapper[4900]: I0127 14:00:06.459033 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 14:00:06 crc kubenswrapper[4900]: I0127 14:00:06.460342 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 14:00:07 crc kubenswrapper[4900]: I0127 14:00:07.699476 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:07 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:07 crc kubenswrapper[4900]: > Jan 27 14:00:09 crc kubenswrapper[4900]: I0127 14:00:09.821967 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:09 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:09 crc kubenswrapper[4900]: > Jan 27 14:00:10 crc kubenswrapper[4900]: I0127 14:00:10.674891 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd"] Jan 27 14:00:10 crc kubenswrapper[4900]: W0127 14:00:10.959168 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod582d5425_f779_4617_843e_f68dcfa2e79d.slice/crio-9a93a32eb07c1988473f973c6bcd5a1eb10fbd55ebbaf686989d90613d14046d WatchSource:0}: Error finding container 9a93a32eb07c1988473f973c6bcd5a1eb10fbd55ebbaf686989d90613d14046d: Status 404 returned error can't find the container with id 9a93a32eb07c1988473f973c6bcd5a1eb10fbd55ebbaf686989d90613d14046d Jan 27 14:00:11 crc kubenswrapper[4900]: I0127 14:00:11.140265 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:11 crc kubenswrapper[4900]: I0127 14:00:11.343700 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" event={"ID":"582d5425-f779-4617-843e-f68dcfa2e79d","Type":"ContainerStarted","Data":"9a93a32eb07c1988473f973c6bcd5a1eb10fbd55ebbaf686989d90613d14046d"} Jan 27 14:00:12 crc kubenswrapper[4900]: I0127 14:00:12.490147 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-kbwqx" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:12 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:12 crc kubenswrapper[4900]: > Jan 27 14:00:13 crc kubenswrapper[4900]: I0127 14:00:13.533732 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" event={"ID":"582d5425-f779-4617-843e-f68dcfa2e79d","Type":"ContainerStarted","Data":"f5e99ad9ddb61e1bf5f50bafc044db0f2578ce5eae76a2f53ae5b3a7aa0da49d"} Jan 27 14:00:15 crc kubenswrapper[4900]: I0127 14:00:15.826502 4900 patch_prober.go:28] interesting pod/console-operator-58897d9998-x96hr container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:15 crc kubenswrapper[4900]: I0127 14:00:15.827385 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-x96hr" podUID="a3973a7c-f509-4769-93a6-4f71f99cc515" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:15 crc kubenswrapper[4900]: I0127 14:00:15.826601 4900 patch_prober.go:28] interesting pod/console-operator-58897d9998-x96hr container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:15 crc kubenswrapper[4900]: I0127 14:00:15.827479 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-x96hr" podUID="a3973a7c-f509-4769-93a6-4f71f99cc515" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:18 crc kubenswrapper[4900]: I0127 14:00:17.635256 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:18 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:18 crc kubenswrapper[4900]: > Jan 27 14:00:18 crc kubenswrapper[4900]: I0127 14:00:18.478890 4900 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-rds4l container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:18 crc kubenswrapper[4900]: I0127 14:00:18.478991 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" podUID="0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:18 crc kubenswrapper[4900]: I0127 14:00:18.482848 4900 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-rds4l container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:18 crc kubenswrapper[4900]: I0127 14:00:18.482922 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" podUID="0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:21 crc kubenswrapper[4900]: I0127 14:00:21.073322 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:21 crc kubenswrapper[4900]: I0127 14:00:21.240545 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:21 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:21 crc kubenswrapper[4900]: > Jan 27 14:00:21 crc kubenswrapper[4900]: I0127 14:00:21.732618 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" event={"ID":"582d5425-f779-4617-843e-f68dcfa2e79d","Type":"ContainerDied","Data":"f5e99ad9ddb61e1bf5f50bafc044db0f2578ce5eae76a2f53ae5b3a7aa0da49d"} Jan 27 14:00:21 crc kubenswrapper[4900]: I0127 14:00:21.738008 4900 generic.go:334] "Generic (PLEG): container finished" podID="582d5425-f779-4617-843e-f68dcfa2e79d" containerID="f5e99ad9ddb61e1bf5f50bafc044db0f2578ce5eae76a2f53ae5b3a7aa0da49d" exitCode=0 Jan 27 14:00:22 crc kubenswrapper[4900]: I0127 14:00:22.275208 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-kbwqx" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:22 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:22 crc kubenswrapper[4900]: > Jan 27 14:00:26 crc kubenswrapper[4900]: I0127 14:00:26.995522 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:27 crc kubenswrapper[4900]: I0127 14:00:26.995540 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:27 crc kubenswrapper[4900]: I0127 14:00:26.997289 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:27 crc kubenswrapper[4900]: I0127 14:00:26.997429 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:27 crc kubenswrapper[4900]: I0127 14:00:27.863175 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:27 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:27 crc kubenswrapper[4900]: > Jan 27 14:00:28 crc kubenswrapper[4900]: I0127 14:00:28.895249 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" event={"ID":"582d5425-f779-4617-843e-f68dcfa2e79d","Type":"ContainerDied","Data":"9a93a32eb07c1988473f973c6bcd5a1eb10fbd55ebbaf686989d90613d14046d"} Jan 27 14:00:28 crc kubenswrapper[4900]: I0127 14:00:28.896014 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a93a32eb07c1988473f973c6bcd5a1eb10fbd55ebbaf686989d90613d14046d" Jan 27 14:00:28 crc kubenswrapper[4900]: I0127 14:00:28.903539 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" Jan 27 14:00:29 crc kubenswrapper[4900]: I0127 14:00:29.124561 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/582d5425-f779-4617-843e-f68dcfa2e79d-config-volume\") pod \"582d5425-f779-4617-843e-f68dcfa2e79d\" (UID: \"582d5425-f779-4617-843e-f68dcfa2e79d\") " Jan 27 14:00:29 crc kubenswrapper[4900]: I0127 14:00:29.124711 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xvzwl\" (UniqueName: \"kubernetes.io/projected/582d5425-f779-4617-843e-f68dcfa2e79d-kube-api-access-xvzwl\") pod \"582d5425-f779-4617-843e-f68dcfa2e79d\" (UID: \"582d5425-f779-4617-843e-f68dcfa2e79d\") " Jan 27 14:00:29 crc kubenswrapper[4900]: I0127 14:00:29.125195 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/582d5425-f779-4617-843e-f68dcfa2e79d-secret-volume\") pod \"582d5425-f779-4617-843e-f68dcfa2e79d\" (UID: \"582d5425-f779-4617-843e-f68dcfa2e79d\") " Jan 27 14:00:29 crc kubenswrapper[4900]: I0127 14:00:29.200289 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/582d5425-f779-4617-843e-f68dcfa2e79d-config-volume" (OuterVolumeSpecName: "config-volume") pod "582d5425-f779-4617-843e-f68dcfa2e79d" (UID: "582d5425-f779-4617-843e-f68dcfa2e79d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 14:00:29 crc kubenswrapper[4900]: I0127 14:00:29.230279 4900 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/582d5425-f779-4617-843e-f68dcfa2e79d-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 14:00:29 crc kubenswrapper[4900]: I0127 14:00:29.463893 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/582d5425-f779-4617-843e-f68dcfa2e79d-kube-api-access-xvzwl" (OuterVolumeSpecName: "kube-api-access-xvzwl") pod "582d5425-f779-4617-843e-f68dcfa2e79d" (UID: "582d5425-f779-4617-843e-f68dcfa2e79d"). InnerVolumeSpecName "kube-api-access-xvzwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:00:29 crc kubenswrapper[4900]: I0127 14:00:29.467670 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/582d5425-f779-4617-843e-f68dcfa2e79d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "582d5425-f779-4617-843e-f68dcfa2e79d" (UID: "582d5425-f779-4617-843e-f68dcfa2e79d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 14:00:29 crc kubenswrapper[4900]: I0127 14:00:29.541411 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xvzwl\" (UniqueName: \"kubernetes.io/projected/582d5425-f779-4617-843e-f68dcfa2e79d-kube-api-access-xvzwl\") on node \"crc\" DevicePath \"\"" Jan 27 14:00:29 crc kubenswrapper[4900]: I0127 14:00:29.541829 4900 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/582d5425-f779-4617-843e-f68dcfa2e79d-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 14:00:30 crc kubenswrapper[4900]: I0127 14:00:29.949934 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492040-pkrtd" Jan 27 14:00:30 crc kubenswrapper[4900]: I0127 14:00:30.013768 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:30 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:30 crc kubenswrapper[4900]: > Jan 27 14:00:30 crc kubenswrapper[4900]: I0127 14:00:30.075361 4900 patch_prober.go:28] interesting pod/oauth-openshift-6d4d98fcc6-f4gd5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.68:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:30 crc kubenswrapper[4900]: I0127 14:00:30.075412 4900 patch_prober.go:28] interesting pod/oauth-openshift-6d4d98fcc6-f4gd5 container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.68:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:30 crc kubenswrapper[4900]: I0127 14:00:30.075453 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" podUID="88330a90-8030-489a-898c-2690958a1a8e" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.68:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:30 crc kubenswrapper[4900]: I0127 14:00:30.075500 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" podUID="88330a90-8030-489a-898c-2690958a1a8e" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.68:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.156314 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.239502 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.239573 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" podUID="8031fe9b-6753-4ab7-abac-fece10fd066b" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.239802 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.239853 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/frr-k8s-gnhhx" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.240267 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" podUID="8031fe9b-6753-4ab7-abac-fece10fd066b" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.281721 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" podUID="b094071d-c368-40e6-8515-a17d0a22a868" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.103:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.277114 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="frr" containerStatusID={"Type":"cri-o","ID":"1fe2024e0a8622ac34ff6a14a0930a30efa9fc32f2b5e8f61fc308b8b1f6771d"} pod="metallb-system/frr-k8s-gnhhx" containerMessage="Container frr failed liveness probe, will be restarted" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.329385 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="frr" containerID="cri-o://1fe2024e0a8622ac34ff6a14a0930a30efa9fc32f2b5e8f61fc308b8b1f6771d" gracePeriod=2 Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.381434 4900 patch_prober.go:28] interesting pod/thanos-querier-6dfcd64f45-mmzj6 container/kube-rbac-proxy-web namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.80:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.381539 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" podUID="a3f86dbd-dce0-4546-8668-e235cc7b5b2d" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.80:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.485501 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" podUID="e3cba13f-5396-4c71-8f81-d2d932baca1f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.633602 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" podUID="5155088c-b873-4fac-b1e9-87f57c2fae68" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.104:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.633634 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" podUID="e8b4a268-6430-4f23-bd93-aa62b52710a6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.105:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.765407 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" podUID="175105c2-dfc2-4752-bf75-a027d86dc373" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.765461 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" podUID="e8365f5d-b2f2-4cab-a803-e722c65ae307" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.809377 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" podUID="a6dce274-9090-44fc-ac6b-6e164e5b7192" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.809426 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" podUID="4715fe70-acab-4dea-adde-68e1a6e8cb28" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.940143 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" podUID="5104a740-a23d-4ea4-a186-97768d490075" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.112:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:31 crc kubenswrapper[4900]: I0127 14:00:31.940537 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" podUID="4967ec79-a9dd-438a-9cb7-b89b3af09ff5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.111:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:32 crc kubenswrapper[4900]: I0127 14:00:32.120329 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" podUID="65b8356b-f64f-4cb8-94af-6b8d45448a63" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.114:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:32 crc kubenswrapper[4900]: I0127 14:00:32.264325 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" podUID="70b6c48f-4c95-468f-a792-abe4e318948f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.117:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:32 crc kubenswrapper[4900]: I0127 14:00:32.345636 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" podUID="5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.118:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:32 crc kubenswrapper[4900]: I0127 14:00:32.386348 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" podUID="1759ba9c-7c4a-4380-81f5-e67d8e418fa1" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.119:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:32 crc kubenswrapper[4900]: I0127 14:00:32.575814 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-kbwqx" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:32 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:32 crc kubenswrapper[4900]: > Jan 27 14:00:33 crc kubenswrapper[4900]: I0127 14:00:33.183513 4900 generic.go:334] "Generic (PLEG): container finished" podID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerID="1fe2024e0a8622ac34ff6a14a0930a30efa9fc32f2b5e8f61fc308b8b1f6771d" exitCode=143 Jan 27 14:00:33 crc kubenswrapper[4900]: I0127 14:00:33.183602 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gnhhx" event={"ID":"ec5f276d-77b1-4fa8-b00b-7230c546a47f","Type":"ContainerDied","Data":"1fe2024e0a8622ac34ff6a14a0930a30efa9fc32f2b5e8f61fc308b8b1f6771d"} Jan 27 14:00:33 crc kubenswrapper[4900]: I0127 14:00:33.579306 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" podUID="f3cc1727-3d00-43f4-92c3-5ef428297727" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:33 crc kubenswrapper[4900]: I0127 14:00:33.794559 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:00:33 crc kubenswrapper[4900]: I0127 14:00:33.794652 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.031418 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" podUID="899811c4-fce0-42df-b3e7-9b1495cad676" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.031459 4900 patch_prober.go:28] interesting pod/nmstate-webhook-8474b5b9d8-82kkf container/nmstate-webhook namespace/openshift-nmstate: Readiness probe status=failure output="Get \"https://10.217.0.87:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.031696 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" podUID="b1058137-9f30-4107-a5a2-1a2edf16cbce" containerName="nmstate-webhook" probeResult="failure" output="Get \"https://10.217.0.87:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.031422 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" podUID="899811c4-fce0-42df-b3e7-9b1495cad676" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.272627 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zmqn9 container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.272756 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" podUID="4de6c1e3-c4c6-47f9-951f-b07adc7744cf" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.288300 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zslxm container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.56:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.288414 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" podUID="8d06d09a-f602-4b44-a4d0-2566d02321df" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.56:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.389301 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" podUID="e08773f7-5eaf-4a76-b671-0681c02a3471" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.122:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.389304 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" podUID="e08773f7-5eaf-4a76-b671-0681c02a3471" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.122:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.754629 4900 patch_prober.go:28] interesting pod/metrics-server-7dbbbb77f-fjj4n container/metrics-server namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.82:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.755134 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" podUID="61ef39b0-502c-45d5-be3a-e11c6ae19d59" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.82:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.799014 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.802981 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.852286 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-8mc5v" podUID="7513569c-d113-4de0-8d1c-734db1c14659" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:34 crc kubenswrapper[4900]: I0127 14:00:34.852294 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-8mc5v" podUID="7513569c-d113-4de0-8d1c-734db1c14659" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:35 crc kubenswrapper[4900]: I0127 14:00:35.143586 4900 patch_prober.go:28] interesting pod/monitoring-plugin-66c88bc574-zpdfk container/monitoring-plugin namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.83:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:35 crc kubenswrapper[4900]: I0127 14:00:35.143694 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" podUID="654e1706-9908-41f6-ba56-90e58ff3f665" containerName="monitoring-plugin" probeResult="failure" output="Get \"https://10.217.0.83:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:35 crc kubenswrapper[4900]: I0127 14:00:35.159280 4900 patch_prober.go:28] interesting pod/authentication-operator-69f744f599-s7kkg container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.28:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:35 crc kubenswrapper[4900]: I0127 14:00:35.159412 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" podUID="8a55139c-7a3d-4800-b2d1-8cc5270d6eaa" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.28:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:35 crc kubenswrapper[4900]: I0127 14:00:35.449374 4900 patch_prober.go:28] interesting pod/route-controller-manager-7b4c94f6f7-5v8z8 container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.66:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:35 crc kubenswrapper[4900]: I0127 14:00:35.449411 4900 patch_prober.go:28] interesting pod/route-controller-manager-7b4c94f6f7-5v8z8 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.66:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:35 crc kubenswrapper[4900]: I0127 14:00:35.449470 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" podUID="af34c04d-2e4e-47f7-919b-5b56554880dc" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.66:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:35 crc kubenswrapper[4900]: I0127 14:00:35.449610 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" podUID="af34c04d-2e4e-47f7-919b-5b56554880dc" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.66:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:35 crc kubenswrapper[4900]: I0127 14:00:35.544466 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-index-xvszg" podUID="147d12f6-3180-41d8-92c9-55aab763d313" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:35 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:35 crc kubenswrapper[4900]: > Jan 27 14:00:35 crc kubenswrapper[4900]: I0127 14:00:35.548129 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-xvszg" podUID="147d12f6-3180-41d8-92c9-55aab763d313" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:35 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:35 crc kubenswrapper[4900]: > Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.005774 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Liveness probe status=failure output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.005881 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.005985 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.006007 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.006296 4900 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-p424v container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.8:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.006320 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-59bdc8b94-p424v" podUID="6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.8:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.006944 4900 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-p424v container/operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.8:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.006970 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/observability-operator-59bdc8b94-p424v" podUID="6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.8:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.007022 4900 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zdrjw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.007044 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podUID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.007105 4900 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zdrjw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.007125 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podUID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.357483 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f"] Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.371418 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29491995-d8c7f"] Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.390507 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.166:9090/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.393317 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/prometheus-metric-storage-0" podUID="ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.166:9090/-/healthy\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.598465 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f8d606c-71a1-417f-b6f5-0e224785c9e5" path="/var/lib/kubelet/pods/7f8d606c-71a1-417f-b6f5-0e224785c9e5/volumes" Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.880952 4900 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-jnwth container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.881084 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" podUID="9a6a8e52-38d8-41a6-863f-78255609c063" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.881215 4900 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-jnwth container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:36 crc kubenswrapper[4900]: I0127 14:00:36.881315 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" podUID="9a6a8e52-38d8-41a6-863f-78255609c063" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:36.999746 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:37.000253 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:36.999951 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:37.001337 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:37.042227 4900 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:37.042342 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:37.450647 4900 patch_prober.go:28] interesting pod/controller-manager-866c485c84-8p95x container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:37.450787 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" podUID="bffb26f0-0279-4ad6-ba95-920c7a358068" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:37.450617 4900 patch_prober.go:28] interesting pod/controller-manager-866c485c84-8p95x container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:37.450921 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" podUID="bffb26f0-0279-4ad6-ba95-920c7a358068" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:37.621755 4900 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Liveness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:37.621863 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:37.942310 4900 patch_prober.go:28] interesting pod/console-7f9b6cf6cc-5nhbt container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.138:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:37.942800 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-7f9b6cf6cc-5nhbt" podUID="57b93bd3-2b4f-45f5-9691-4b3f553c1c13" containerName="console" probeResult="failure" output="Get \"https://10.217.0.138:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:37 crc kubenswrapper[4900]: I0127 14:00:37.954009 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:37 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:37 crc kubenswrapper[4900]: > Jan 27 14:00:38 crc kubenswrapper[4900]: I0127 14:00:38.188740 4900 patch_prober.go:28] interesting pod/logging-loki-distributor-5f678c8dd6-k62tn container/loki-distributor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.52:3101/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:38 crc kubenswrapper[4900]: I0127 14:00:38.188849 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" podUID="bf3e0b5e-77aa-4f51-9cca-149e20525f8f" containerName="loki-distributor" probeResult="failure" output="Get \"https://10.217.0.52:3101/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:38 crc kubenswrapper[4900]: I0127 14:00:38.354866 4900 patch_prober.go:28] interesting pod/logging-loki-querier-76788598db-rg7hv container/loki-querier namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.53:3101/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:38 crc kubenswrapper[4900]: I0127 14:00:38.354988 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" podUID="f1cfe76c-2aba-4da6-a7a7-fa01e883cb60" containerName="loki-querier" probeResult="failure" output="Get \"https://10.217.0.53:3101/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:38 crc kubenswrapper[4900]: I0127 14:00:38.376969 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gnhhx" event={"ID":"ec5f276d-77b1-4fa8-b00b-7230c546a47f","Type":"ContainerStarted","Data":"599b501bfcbc5d9fd67f5c42625ff24d7077b9363dc949c5f5fe0711f58d0080"} Jan 27 14:00:38 crc kubenswrapper[4900]: I0127 14:00:38.442462 4900 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-rds4l container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:38 crc kubenswrapper[4900]: I0127 14:00:38.442573 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" podUID="0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:38 crc kubenswrapper[4900]: I0127 14:00:38.443941 4900 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-rds4l container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:38 crc kubenswrapper[4900]: I0127 14:00:38.444050 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" podUID="0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:38 crc kubenswrapper[4900]: I0127 14:00:38.489197 4900 patch_prober.go:28] interesting pod/logging-loki-query-frontend-69d9546745-mlj7c container/loki-query-frontend namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:38 crc kubenswrapper[4900]: I0127 14:00:38.489298 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" podUID="62fdb605-a4e3-443d-9887-1ebc8218908f" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:38 crc kubenswrapper[4900]: I0127 14:00:38.958500 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-687f57d79b-4f2kx" podUID="c35e074b-0e8e-4d1f-8d2c-5c23cf320f25" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:38 crc kubenswrapper[4900]: I0127 14:00:38.958582 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="cert-manager/cert-manager-webhook-687f57d79b-4f2kx" podUID="c35e074b-0e8e-4d1f-8d2c-5c23cf320f25" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:39 crc kubenswrapper[4900]: I0127 14:00:39.273867 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zmqn9 container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:39 crc kubenswrapper[4900]: I0127 14:00:39.273962 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" podUID="4de6c1e3-c4c6-47f9-951f-b07adc7744cf" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:39 crc kubenswrapper[4900]: I0127 14:00:39.288039 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zslxm container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.56:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:39 crc kubenswrapper[4900]: I0127 14:00:39.288170 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" podUID="8d06d09a-f602-4b44-a4d0-2566d02321df" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.56:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:39 crc kubenswrapper[4900]: I0127 14:00:39.501708 4900 trace.go:236] Trace[774220678]: "Calculate volume metrics of glance for pod openstack/glance-default-internal-api-0" (27-Jan-2026 14:00:35.719) (total time: 3776ms): Jan 27 14:00:39 crc kubenswrapper[4900]: Trace[774220678]: [3.776820075s] [3.776820075s] END Jan 27 14:00:39 crc kubenswrapper[4900]: I0127 14:00:39.501727 4900 trace.go:236] Trace[113757790]: "Calculate volume metrics of storage for pod openshift-logging/logging-loki-ingester-0" (27-Jan-2026 14:00:36.783) (total time: 2712ms): Jan 27 14:00:39 crc kubenswrapper[4900]: Trace[113757790]: [2.712998232s] [2.712998232s] END Jan 27 14:00:39 crc kubenswrapper[4900]: I0127 14:00:39.501729 4900 trace.go:236] Trace[418891326]: "Calculate volume metrics of mysql-db for pod openstack/openstack-cell1-galera-0" (27-Jan-2026 14:00:36.890) (total time: 2608ms): Jan 27 14:00:39 crc kubenswrapper[4900]: Trace[418891326]: [2.608654165s] [2.608654165s] END Jan 27 14:00:39 crc kubenswrapper[4900]: I0127 14:00:39.785206 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:39 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:39 crc kubenswrapper[4900]: > Jan 27 14:00:39 crc kubenswrapper[4900]: I0127 14:00:39.797440 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="780131af-30a7-406a-8ae9-b9a3a0826d1e" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Jan 27 14:00:39 crc kubenswrapper[4900]: I0127 14:00:39.798138 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="780131af-30a7-406a-8ae9-b9a3a0826d1e" containerName="ceilometer-notification-agent" probeResult="failure" output="command timed out" Jan 27 14:00:39 crc kubenswrapper[4900]: I0127 14:00:39.910897 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-chtfl" Jan 27 14:00:40 crc kubenswrapper[4900]: I0127 14:00:39.993474 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" podUID="0d203d65-c23c-4e25-b72b-7b5a69441b5f" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.93:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:40 crc kubenswrapper[4900]: I0127 14:00:39.993578 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" podUID="0d203d65-c23c-4e25-b72b-7b5a69441b5f" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.93:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:40 crc kubenswrapper[4900]: I0127 14:00:40.054327 4900 patch_prober.go:28] interesting pod/oauth-openshift-6d4d98fcc6-f4gd5 container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.68:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:40 crc kubenswrapper[4900]: I0127 14:00:40.054410 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" podUID="88330a90-8030-489a-898c-2690958a1a8e" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.68:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:40 crc kubenswrapper[4900]: I0127 14:00:40.054516 4900 patch_prober.go:28] interesting pod/oauth-openshift-6d4d98fcc6-f4gd5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.68:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:40 crc kubenswrapper[4900]: I0127 14:00:40.054638 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" podUID="88330a90-8030-489a-898c-2690958a1a8e" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.68:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:40 crc kubenswrapper[4900]: I0127 14:00:40.055973 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-gnhhx" Jan 27 14:00:40 crc kubenswrapper[4900]: I0127 14:00:40.053968 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"edf34d1f731a0ed88ee92135cb225664c8ae3dfdfe5de507d718d31c06dfc817"} pod="openshift-marketplace/community-operators-chtfl" containerMessage="Container registry-server failed startup probe, will be restarted" Jan 27 14:00:40 crc kubenswrapper[4900]: I0127 14:00:40.081689 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" containerID="cri-o://edf34d1f731a0ed88ee92135cb225664c8ae3dfdfe5de507d718d31c06dfc817" gracePeriod=30 Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.112480 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.196383 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" podUID="8031fe9b-6753-4ab7-abac-fece10fd066b" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.237340 4900 prober.go:107] "Probe failed" probeType="Startup" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.237427 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.237763 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" podUID="8031fe9b-6753-4ab7-abac-fece10fd066b" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.319410 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" podUID="1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.402452 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" podUID="b094071d-c368-40e6-8515-a17d0a22a868" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.103:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.403510 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" podUID="1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.404000 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" podUID="b094071d-c368-40e6-8515-a17d0a22a868" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.103:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.404015 4900 patch_prober.go:28] interesting pod/thanos-querier-6dfcd64f45-mmzj6 container/kube-rbac-proxy-web namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.80:9091/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.404270 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" podUID="a3f86dbd-dce0-4546-8668-e235cc7b5b2d" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.80:9091/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.529362 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" podUID="e3cba13f-5396-4c71-8f81-d2d932baca1f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.529930 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" podUID="e3cba13f-5396-4c71-8f81-d2d932baca1f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.644232 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" podUID="5155088c-b873-4fac-b1e9-87f57c2fae68" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.104:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.727470 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" podUID="5155088c-b873-4fac-b1e9-87f57c2fae68" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.104:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.727545 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" podUID="e8b4a268-6430-4f23-bd93-aa62b52710a6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.105:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:41 crc kubenswrapper[4900]: I0127 14:00:41.809493 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" podUID="e8b4a268-6430-4f23-bd93-aa62b52710a6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.105:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.057372 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" podUID="175105c2-dfc2-4752-bf75-a027d86dc373" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.057425 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" podUID="a6dce274-9090-44fc-ac6b-6e164e5b7192" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.057428 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" podUID="175105c2-dfc2-4752-bf75-a027d86dc373" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.057443 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" podUID="e8365f5d-b2f2-4cab-a803-e722c65ae307" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.139506 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" podUID="e8365f5d-b2f2-4cab-a803-e722c65ae307" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.139545 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" podUID="4715fe70-acab-4dea-adde-68e1a6e8cb28" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.188489 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" podUID="4715fe70-acab-4dea-adde-68e1a6e8cb28" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.189387 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" podUID="4967ec79-a9dd-438a-9cb7-b89b3af09ff5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.111:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.189480 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" podUID="a6dce274-9090-44fc-ac6b-6e164e5b7192" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.357366 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" podUID="5104a740-a23d-4ea4-a186-97768d490075" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.112:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.357404 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" podUID="65b8356b-f64f-4cb8-94af-6b8d45448a63" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.114:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.357586 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" podUID="5104a740-a23d-4ea4-a186-97768d490075" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.112:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.442359 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" podUID="0c0782a0-6d83-4760-82dd-cea358647713" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.524402 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" podUID="a988e8ab-311d-4b6a-a75e-c49601a77d46" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.113:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.524458 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" podUID="70b6c48f-4c95-468f-a792-abe4e318948f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.117:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.524644 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" podUID="a988e8ab-311d-4b6a-a75e-c49601a77d46" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.113:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.606801 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" podUID="5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.118:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.606803 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" podUID="4967ec79-a9dd-438a-9cb7-b89b3af09ff5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.111:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.694296 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" podUID="1759ba9c-7c4a-4380-81f5-e67d8e418fa1" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.119:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.694531 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" podUID="0c0782a0-6d83-4760-82dd-cea358647713" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.694249 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" podUID="65b8356b-f64f-4cb8-94af-6b8d45448a63" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.114:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.778347 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" podUID="70b6c48f-4c95-468f-a792-abe4e318948f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.117:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.778465 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" podUID="5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.118:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.778606 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" podUID="1759ba9c-7c4a-4380-81f5-e67d8e418fa1" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.119:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.778765 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" podUID="5d4cc48d-12ab-458e-bf29-bc87a182f5c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.120:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:42 crc kubenswrapper[4900]: I0127 14:00:42.778871 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" podUID="5d4cc48d-12ab-458e-bf29-bc87a182f5c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.120:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:43 crc kubenswrapper[4900]: I0127 14:00:43.264431 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" podUID="76f1d09b-01aa-4c81-b568-8ffb58182475" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.107:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:43 crc kubenswrapper[4900]: I0127 14:00:43.561445 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-kbwqx" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:43 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:43 crc kubenswrapper[4900]: > Jan 27 14:00:43 crc kubenswrapper[4900]: I0127 14:00:43.624547 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" podUID="f3cc1727-3d00-43f4-92c3-5ef428297727" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:43 crc kubenswrapper[4900]: I0127 14:00:43.624586 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" podUID="f3cc1727-3d00-43f4-92c3-5ef428297727" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.100:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:43 crc kubenswrapper[4900]: I0127 14:00:43.793978 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:00:43 crc kubenswrapper[4900]: I0127 14:00:43.794802 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:00:43 crc kubenswrapper[4900]: I0127 14:00:43.991606 4900 patch_prober.go:28] interesting pod/nmstate-webhook-8474b5b9d8-82kkf container/nmstate-webhook namespace/openshift-nmstate: Readiness probe status=failure output="Get \"https://10.217.0.87:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:43.992212 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" podUID="b1058137-9f30-4107-a5a2-1a2edf16cbce" containerName="nmstate-webhook" probeResult="failure" output="Get \"https://10.217.0.87:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:43.996241 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" podUID="899811c4-fce0-42df-b3e7-9b1495cad676" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.825088 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.825299 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zmqn9 container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": context deadline exceeded" start-of-body= Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.825363 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" podUID="4de6c1e3-c4c6-47f9-951f-b07adc7744cf" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": context deadline exceeded" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.825446 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zslxm container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.56:8081/ready\": context deadline exceeded" start-of-body= Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.825466 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" podUID="8d06d09a-f602-4b44-a4d0-2566d02321df" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.56:8081/ready\": context deadline exceeded" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.826873 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zslxm container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.56:8083/ready\": context deadline exceeded" start-of-body= Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.826959 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" podUID="8d06d09a-f602-4b44-a4d0-2566d02321df" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.56:8083/ready\": context deadline exceeded" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.827645 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zmqn9 container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8081/ready\": context deadline exceeded" start-of-body= Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.827738 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" podUID="4de6c1e3-c4c6-47f9-951f-b07adc7744cf" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/ready\": context deadline exceeded" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.884311 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" podUID="e08773f7-5eaf-4a76-b671-0681c02a3471" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.122:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.886003 4900 patch_prober.go:28] interesting pod/metrics-server-7dbbbb77f-fjj4n container/metrics-server namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.82:10250/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.886045 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" podUID="61ef39b0-502c-45d5-be3a-e11c6ae19d59" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.82:10250/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.928423 4900 patch_prober.go:28] interesting pod/metrics-server-7dbbbb77f-fjj4n container/metrics-server namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.82:10250/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.928560 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" podUID="61ef39b0-502c-45d5-be3a-e11c6ae19d59" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.82:10250/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.928666 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-8mc5v" podUID="7513569c-d113-4de0-8d1c-734db1c14659" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:44.933239 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-8mc5v" podUID="7513569c-d113-4de0-8d1c-734db1c14659" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.225235 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.796716 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="780131af-30a7-406a-8ae9-b9a3a0826d1e" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.826393 4900 patch_prober.go:28] interesting pod/route-controller-manager-7b4c94f6f7-5v8z8 container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.66:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.826457 4900 patch_prober.go:28] interesting pod/route-controller-manager-7b4c94f6f7-5v8z8 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.66:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.826584 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" podUID="af34c04d-2e4e-47f7-919b-5b56554880dc" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.66:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.826499 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" podUID="af34c04d-2e4e-47f7-919b-5b56554880dc" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.66:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.826711 4900 patch_prober.go:28] interesting pod/authentication-operator-69f744f599-s7kkg container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.28:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.826739 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-69f744f599-s7kkg" podUID="8a55139c-7a3d-4800-b2d1-8cc5270d6eaa" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.28:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.827929 4900 patch_prober.go:28] interesting pod/monitoring-plugin-66c88bc574-zpdfk container/monitoring-plugin namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.83:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.828079 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" podUID="654e1706-9908-41f6-ba56-90e58ff3f665" containerName="monitoring-plugin" probeResult="failure" output="Get \"https://10.217.0.83:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.870453 4900 patch_prober.go:28] interesting pod/console-operator-58897d9998-x96hr container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.870536 4900 patch_prober.go:28] interesting pod/console-operator-58897d9998-x96hr container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.870550 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-x96hr" podUID="a3973a7c-f509-4769-93a6-4f71f99cc515" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.870629 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-x96hr" podUID="a3973a7c-f509-4769-93a6-4f71f99cc515" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.870685 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:45 crc kubenswrapper[4900]: I0127 14:00:45.870709 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.077275 4900 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-p424v container/operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.8:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.077379 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/observability-operator-59bdc8b94-p424v" podUID="6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.8:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.077956 4900 patch_prober.go:28] interesting pod/downloads-7954f5f757-zbp4l container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.078050 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zbp4l" podUID="75d8fa11-eb06-4aae-8e96-3bb4328d69d7" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.078356 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.078397 4900 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zdrjw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.078431 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podUID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.078435 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.078479 4900 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zdrjw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.078501 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podUID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.078556 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Liveness probe status=failure output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.078587 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.120453 4900 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-p424v container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.8:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.120638 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-59bdc8b94-p424v" podUID="6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.8:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.120731 4900 prober.go:107] "Probe failed" probeType="Startup" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.203634 4900 patch_prober.go:28] interesting pod/perses-operator-5bf474d74f-cbtnw container/perses-operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.22:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.203772 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" podUID="b925e9c1-ac78-41d5-a783-88a95ae66df6" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.22:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.203914 4900 patch_prober.go:28] interesting pod/perses-operator-5bf474d74f-cbtnw container/perses-operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.22:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.203937 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" podUID="b925e9c1-ac78-41d5-a783-88a95ae66df6" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.22:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.382538 4900 patch_prober.go:28] interesting pod/thanos-querier-6dfcd64f45-mmzj6 container/kube-rbac-proxy-web namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.80:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.382648 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" podUID="a3f86dbd-dce0-4546-8668-e235cc7b5b2d" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.80:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.390763 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.166:9090/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.390788 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/prometheus-metric-storage-0" podUID="ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.166:9090/-/healthy\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.881114 4900 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-jnwth container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.881632 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" podUID="9a6a8e52-38d8-41a6-863f-78255609c063" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.881166 4900 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-jnwth container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:46 crc kubenswrapper[4900]: I0127 14:00:46.881730 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" podUID="9a6a8e52-38d8-41a6-863f-78255609c063" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.011562 4900 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-hffvz container/package-server-manager namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.011762 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" podUID="db5a985c-5b4c-4ab5-ab7c-61b356b88494" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.011814 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.011915 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.011938 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.012011 4900 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-hffvz container/package-server-manager namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.012108 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" podUID="db5a985c-5b4c-4ab5-ab7c-61b356b88494" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.011964 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.021771 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.023079 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.026419 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="packageserver" containerStatusID={"Type":"cri-o","ID":"aea49d80580735b7c3077d2ecd2a58286854904b62c4438bcc939e0966a00158"} pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" containerMessage="Container packageserver failed liveness probe, will be restarted" Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.028290 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" containerID="cri-o://aea49d80580735b7c3077d2ecd2a58286854904b62c4438bcc939e0966a00158" gracePeriod=30 Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.040427 4900 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.040551 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.443397 4900 patch_prober.go:28] interesting pod/controller-manager-866c485c84-8p95x container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.443504 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" podUID="bffb26f0-0279-4ad6-ba95-920c7a358068" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.456436 4900 patch_prober.go:28] interesting pod/controller-manager-866c485c84-8p95x container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.456564 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" podUID="bffb26f0-0279-4ad6-ba95-920c7a358068" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.621944 4900 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Liveness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": context deadline exceeded" start-of-body= Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.622076 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": context deadline exceeded" Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.796666 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-index-xvszg" podUID="147d12f6-3180-41d8-92c9-55aab763d313" containerName="registry-server" probeResult="failure" output="command timed out" Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.798311 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-xvszg" podUID="147d12f6-3180-41d8-92c9-55aab763d313" containerName="registry-server" probeResult="failure" output="command timed out" Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.942572 4900 patch_prober.go:28] interesting pod/console-7f9b6cf6cc-5nhbt container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.138:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:47 crc kubenswrapper[4900]: I0127 14:00:47.942679 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-7f9b6cf6cc-5nhbt" podUID="57b93bd3-2b4f-45f5-9691-4b3f553c1c13" containerName="console" probeResult="failure" output="Get \"https://10.217.0.138:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.023762 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.023902 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.189410 4900 patch_prober.go:28] interesting pod/logging-loki-distributor-5f678c8dd6-k62tn container/loki-distributor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.189572 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" podUID="bf3e0b5e-77aa-4f51-9cca-149e20525f8f" containerName="loki-distributor" probeResult="failure" output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.356244 4900 patch_prober.go:28] interesting pod/logging-loki-querier-76788598db-rg7hv container/loki-querier namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.53:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.356520 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" podUID="f1cfe76c-2aba-4da6-a7a7-fa01e883cb60" containerName="loki-querier" probeResult="failure" output="Get \"https://10.217.0.53:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.477410 4900 patch_prober.go:28] interesting pod/loki-operator-controller-manager-849c99c676-jbpgt container/manager namespace/openshift-operators-redhat: Liveness probe status=failure output="Get \"http://10.217.0.47:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.477514 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" podUID="386bc10f-9e5d-49d0-9906-e97f1796d49d" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.47:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.477616 4900 patch_prober.go:28] interesting pod/loki-operator-controller-manager-849c99c676-jbpgt container/manager namespace/openshift-operators-redhat: Readiness probe status=failure output="Get \"http://10.217.0.47:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.477632 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" podUID="386bc10f-9e5d-49d0-9906-e97f1796d49d" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.47:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.477693 4900 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-rds4l container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.477714 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" podUID="0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.477799 4900 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-rds4l container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.477918 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" podUID="0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.489921 4900 patch_prober.go:28] interesting pod/logging-loki-query-frontend-69d9546745-mlj7c container/loki-query-frontend namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.490024 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" podUID="62fdb605-a4e3-443d-9887-1ebc8218908f" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.674323 4900 scope.go:117] "RemoveContainer" containerID="3d46533d3f6c249ca600d3f4d986771628602ad7d225cce6313470e290e83923" Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.794982 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-handler-wgnlg" podUID="e78c9b0a-3d34-4f6c-9c65-0ae63482fff7" containerName="nmstate-handler" probeResult="failure" output="command timed out" Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.796948 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="f3eb0985-f56f-4111-86c6-d511433058c0" containerName="prometheus" probeResult="failure" output="command timed out" Jan 27 14:00:48 crc kubenswrapper[4900]: I0127 14:00:48.797984 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-k8s-0" podUID="f3eb0985-f56f-4111-86c6-d511433058c0" containerName="prometheus" probeResult="failure" output="command timed out" Jan 27 14:00:49 crc kubenswrapper[4900]: I0127 14:00:49.275026 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zmqn9 container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:49 crc kubenswrapper[4900]: I0127 14:00:49.275167 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" podUID="4de6c1e3-c4c6-47f9-951f-b07adc7744cf" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:49 crc kubenswrapper[4900]: I0127 14:00:49.288742 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zslxm container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.56:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:49 crc kubenswrapper[4900]: I0127 14:00:49.288869 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" podUID="8d06d09a-f602-4b44-a4d0-2566d02321df" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.56:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:49 crc kubenswrapper[4900]: I0127 14:00:49.484617 4900 patch_prober.go:28] interesting pod/logging-loki-compactor-0 container/loki-compactor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.58:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:49 crc kubenswrapper[4900]: I0127 14:00:49.485235 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-compactor-0" podUID="904dcfed-5ddb-4cb9-bac8-8feb64b3bab4" containerName="loki-compactor" probeResult="failure" output="Get \"https://10.217.0.58:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:49 crc kubenswrapper[4900]: I0127 14:00:49.649686 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" podUID="88d8052f-1988-4229-abc5-100335ed01e2" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8080/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:49 crc kubenswrapper[4900]: I0127 14:00:49.649967 4900 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:49 crc kubenswrapper[4900]: I0127 14:00:49.650085 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="e780efe8-7578-4940-b01c-c199f36d6554" containerName="loki-ingester" probeResult="failure" output="Get \"https://10.217.0.57:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:49 crc kubenswrapper[4900]: I0127 14:00:49.661862 4900 patch_prober.go:28] interesting pod/logging-loki-index-gateway-0 container/loki-index-gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.60:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:49 crc kubenswrapper[4900]: I0127 14:00:49.661983 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-index-gateway-0" podUID="7e0b7978-27f2-42e9-8116-59384da3719b" containerName="loki-index-gateway" probeResult="failure" output="Get \"https://10.217.0.60:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:49 crc kubenswrapper[4900]: I0127 14:00:49.998216 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" podUID="0d203d65-c23c-4e25-b72b-7b5a69441b5f" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.93:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:49.998794 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" podUID="0d203d65-c23c-4e25-b72b-7b5a69441b5f" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.93:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:49.998864 4900 patch_prober.go:28] interesting pod/oauth-openshift-6d4d98fcc6-f4gd5 container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.68:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:49.998899 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" podUID="88330a90-8030-489a-898c-2690958a1a8e" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.68:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:49.998951 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:49.999181 4900 patch_prober.go:28] interesting pod/oauth-openshift-6d4d98fcc6-f4gd5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.68:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:49.999294 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" podUID="88330a90-8030-489a-898c-2690958a1a8e" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.68:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:50.005845 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:50.009374 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="oauth-openshift" containerStatusID={"Type":"cri-o","ID":"99b7678c82d77089ad530dce58e1c26fc00601dda0d1d842420d2dfbee4caa76"} pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" containerMessage="Container oauth-openshift failed liveness probe, will be restarted" Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:50.380701 4900 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-9vrn8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.69:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:50.380809 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" podUID="54ddde5c-b5ea-47c1-8ef5-f697d7319c6b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.69:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:50.381321 4900 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-9vrn8 container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.69:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:50.381482 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-9vrn8" podUID="54ddde5c-b5ea-47c1-8ef5-f697d7319c6b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.69:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:50.801767 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="780131af-30a7-406a-8ae9-b9a3a0826d1e" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:50.802113 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/ceilometer-0" Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:50.804872 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="ceilometer-central-agent" containerStatusID={"Type":"cri-o","ID":"7b6ea029f6d464c8d975de774809f4a8ad2d400f9a65de460b9211083ded43f3"} pod="openstack/ceilometer-0" containerMessage="Container ceilometer-central-agent failed liveness probe, will be restarted" Jan 27 14:00:50 crc kubenswrapper[4900]: I0127 14:00:50.805095 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="780131af-30a7-406a-8ae9-b9a3a0826d1e" containerName="ceilometer-central-agent" containerID="cri-o://7b6ea029f6d464c8d975de774809f4a8ad2d400f9a65de460b9211083ded43f3" gracePeriod=30 Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.006485 4900 patch_prober.go:28] interesting pod/oauth-openshift-6d4d98fcc6-f4gd5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.68:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.006584 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" podUID="88330a90-8030-489a-898c-2690958a1a8e" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.68:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.021334 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" event={"ID":"665d09a8-9764-4b5d-975e-5c96fd671dd5","Type":"ContainerDied","Data":"aea49d80580735b7c3077d2ecd2a58286854904b62c4438bcc939e0966a00158"} Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.026799 4900 generic.go:334] "Generic (PLEG): container finished" podID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerID="aea49d80580735b7c3077d2ecd2a58286854904b62c4438bcc939e0966a00158" exitCode=0 Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.156602 4900 prober.go:107] "Probe failed" probeType="Startup" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.198090 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" podUID="8031fe9b-6753-4ab7-abac-fece10fd066b" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.199033 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.202198 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="frr-k8s-webhook-server" containerStatusID={"Type":"cri-o","ID":"3ad00e407836ccd2af15463c034144967f0a9d4ad31f058f57d1fbd26f5b6db0"} pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" containerMessage="Container frr-k8s-webhook-server failed liveness probe, will be restarted" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.202390 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" podUID="8031fe9b-6753-4ab7-abac-fece10fd066b" containerName="frr-k8s-webhook-server" containerID="cri-o://3ad00e407836ccd2af15463c034144967f0a9d4ad31f058f57d1fbd26f5b6db0" gracePeriod=10 Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.239392 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.239574 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/frr-k8s-gnhhx" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.240489 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" podUID="8031fe9b-6753-4ab7-abac-fece10fd066b" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.240664 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.240775 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.241357 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-gnhhx" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.255513 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="controller" containerStatusID={"Type":"cri-o","ID":"58cc2bb3c4ced73464022004c5bc29b75033657c8b232b10549b81b5ebe2e2fb"} pod="metallb-system/frr-k8s-gnhhx" containerMessage="Container controller failed liveness probe, will be restarted" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.255778 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="controller" containerID="cri-o://58cc2bb3c4ced73464022004c5bc29b75033657c8b232b10549b81b5ebe2e2fb" gracePeriod=2 Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.281863 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" podUID="1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.323482 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" podUID="b094071d-c368-40e6-8515-a17d0a22a868" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.103:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.323915 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.364594 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.1.2:8080/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.366140 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.1.2:8081/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.383457 4900 patch_prober.go:28] interesting pod/thanos-querier-6dfcd64f45-mmzj6 container/kube-rbac-proxy-web namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.80:9091/-/healthy\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.383563 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" podUID="a3f86dbd-dce0-4546-8668-e235cc7b5b2d" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.80:9091/-/healthy\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.384227 4900 patch_prober.go:28] interesting pod/thanos-querier-6dfcd64f45-mmzj6 container/kube-rbac-proxy-web namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.80:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.384436 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/thanos-querier-6dfcd64f45-mmzj6" podUID="a3f86dbd-dce0-4546-8668-e235cc7b5b2d" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.80:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.487159 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" podUID="e3cba13f-5396-4c71-8f81-d2d932baca1f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.487336 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.631605 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" podUID="e8b4a268-6430-4f23-bd93-aa62b52710a6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.105:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.631619 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" podUID="5155088c-b873-4fac-b1e9-87f57c2fae68" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.104:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.631823 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.632076 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.722512 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" podUID="175105c2-dfc2-4752-bf75-a027d86dc373" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.722694 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.765449 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" podUID="e8365f5d-b2f2-4cab-a803-e722c65ae307" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.766020 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.799866 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-operators-vc5r8" podUID="c42ffb69-dba9-4ce2-8fe6-a5581776859f" containerName="registry-server" probeResult="failure" output="command timed out" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.799874 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-operators-vc5r8" podUID="c42ffb69-dba9-4ce2-8fe6-a5581776859f" containerName="registry-server" probeResult="failure" output="command timed out" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.807455 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" podUID="4715fe70-acab-4dea-adde-68e1a6e8cb28" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.807617 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.848394 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" podUID="a6dce274-9090-44fc-ac6b-6e164e5b7192" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.848844 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" Jan 27 14:00:51 crc kubenswrapper[4900]: I0127 14:00:51.931452 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/controller-6968d8fdc4-7dbg4" podUID="b047f3e7-1d76-487b-96a3-ff81b159ae95" containerName="controller" probeResult="failure" output="Get \"http://10.217.0.96:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.014538 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" podUID="5104a740-a23d-4ea4-a186-97768d490075" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.112:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.014760 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.014482 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/controller-6968d8fdc4-7dbg4" podUID="b047f3e7-1d76-487b-96a3-ff81b159ae95" containerName="controller" probeResult="failure" output="Get \"http://10.217.0.96:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.015389 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" podUID="4967ec79-a9dd-438a-9cb7-b89b3af09ff5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.111:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.015639 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.078334 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" podUID="a988e8ab-311d-4b6a-a75e-c49601a77d46" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.113:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.121499 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" podUID="65b8356b-f64f-4cb8-94af-6b8d45448a63" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.114:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.121704 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.225435 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" podUID="0c0782a0-6d83-4760-82dd-cea358647713" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.267385 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" podUID="70b6c48f-4c95-468f-a792-abe4e318948f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.117:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.267607 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.432433 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" podUID="5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.118:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.432501 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" podUID="b094071d-c368-40e6-8515-a17d0a22a868" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.103:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.432447 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" podUID="1759ba9c-7c4a-4380-81f5-e67d8e418fa1" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.119:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.432640 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.432696 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.473595 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" podUID="5d4cc48d-12ab-458e-bf29-bc87a182f5c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.120:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.514524 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" podUID="be0258a0-aba9-4900-b507-4767b2726a69" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.121:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.557447 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" podUID="e3cba13f-5396-4c71-8f81-d2d932baca1f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.716533 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" podUID="e8b4a268-6430-4f23-bd93-aa62b52710a6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.105:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.716625 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" podUID="5155088c-b873-4fac-b1e9-87f57c2fae68" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.104:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.765570 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" podUID="175105c2-dfc2-4752-bf75-a027d86dc373" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.765954 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.803758 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output="command timed out" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.807472 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" podUID="e8365f5d-b2f2-4cab-a803-e722c65ae307" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.892399 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" podUID="4715fe70-acab-4dea-adde-68e1a6e8cb28" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.892519 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" podUID="a6dce274-9090-44fc-ac6b-6e164e5b7192" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.947209 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/certified-operators-dkmsz" podUID="d6060c2f-7323-4d7a-9278-500fae84459b" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:52 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:52 crc kubenswrapper[4900]: > Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.947412 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-marketplace-k2gzb" podUID="50203e3a-7094-487f-9d3b-a9467363dfaf" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:52 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:52 crc kubenswrapper[4900]: > Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.947636 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-kbwqx" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:52 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:52 crc kubenswrapper[4900]: > Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.947709 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/certified-operators-dkmsz" podUID="d6060c2f-7323-4d7a-9278-500fae84459b" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:52 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:52 crc kubenswrapper[4900]: > Jan 27 14:00:52 crc kubenswrapper[4900]: I0127 14:00:52.948006 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-k2gzb" podUID="50203e3a-7094-487f-9d3b-a9467363dfaf" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:52 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:52 crc kubenswrapper[4900]: > Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.048269 4900 patch_prober.go:28] interesting pod/image-registry-66df7c8f76-7xgpz container/registry namespace/openshift-image-registry: Liveness probe status=failure output="Get \"https://10.217.0.65:5000/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.048354 4900 patch_prober.go:28] interesting pod/image-registry-66df7c8f76-7xgpz container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.65:5000/healthz\": context deadline exceeded" start-of-body= Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.048380 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" podUID="286b2993-6426-49f6-8b5a-84a2289602a3" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.65:5000/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.049254 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-66df7c8f76-7xgpz" podUID="286b2993-6426-49f6-8b5a-84a2289602a3" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.65:5000/healthz\": context deadline exceeded" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.057423 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" podUID="4967ec79-a9dd-438a-9cb7-b89b3af09ff5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.111:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.123160 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" podUID="65b8356b-f64f-4cb8-94af-6b8d45448a63" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.114:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.134183 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.305433 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" podUID="76f1d09b-01aa-4c81-b568-8ffb58182475" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.107:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.305434 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-7d75bc88d5-6xhcl" podUID="76f1d09b-01aa-4c81-b568-8ffb58182475" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.107:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.346951 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" podUID="70b6c48f-4c95-468f-a792-abe4e318948f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.117:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.476274 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" podUID="1759ba9c-7c4a-4380-81f5-e67d8e418fa1" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.119:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.578629 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" podUID="f3cc1727-3d00-43f4-92c3-5ef428297727" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.578876 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.794254 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="f3eb0985-f56f-4111-86c6-d511433058c0" containerName="prometheus" probeResult="failure" output="command timed out" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.796928 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.797119 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/openstack-galera-0" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.797323 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-k8s-0" podUID="f3eb0985-f56f-4111-86c6-d511433058c0" containerName="prometheus" probeResult="failure" output="command timed out" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.799925 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.800074 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 27 14:00:53 crc kubenswrapper[4900]: I0127 14:00:53.801991 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="galera" containerStatusID={"Type":"cri-o","ID":"6fdd71884020c539bd9777c35cbe22887618ed748b95b5f0a35e3375e433dbda"} pod="openstack/openstack-galera-0" containerMessage="Container galera failed liveness probe, will be restarted" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.040288 4900 patch_prober.go:28] interesting pod/nmstate-webhook-8474b5b9d8-82kkf container/nmstate-webhook namespace/openshift-nmstate: Readiness probe status=failure output="Get \"https://10.217.0.87:9443/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.040892 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" podUID="b1058137-9f30-4107-a5a2-1a2edf16cbce" containerName="nmstate-webhook" probeResult="failure" output="Get \"https://10.217.0.87:9443/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.041014 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.041717 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" podUID="899811c4-fce0-42df-b3e7-9b1495cad676" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.041810 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.042723 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" podUID="899811c4-fce0-42df-b3e7-9b1495cad676" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.093969 4900 generic.go:334] "Generic (PLEG): container finished" podID="8031fe9b-6753-4ab7-abac-fece10fd066b" containerID="3ad00e407836ccd2af15463c034144967f0a9d4ad31f058f57d1fbd26f5b6db0" exitCode=0 Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.094141 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" event={"ID":"8031fe9b-6753-4ab7-abac-fece10fd066b","Type":"ContainerDied","Data":"3ad00e407836ccd2af15463c034144967f0a9d4ad31f058f57d1fbd26f5b6db0"} Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.108755 4900 generic.go:334] "Generic (PLEG): container finished" podID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerID="58cc2bb3c4ced73464022004c5bc29b75033657c8b232b10549b81b5ebe2e2fb" exitCode=0 Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.109007 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gnhhx" event={"ID":"ec5f276d-77b1-4fa8-b00b-7230c546a47f","Type":"ContainerDied","Data":"58cc2bb3c4ced73464022004c5bc29b75033657c8b232b10549b81b5ebe2e2fb"} Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.113415 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" event={"ID":"665d09a8-9764-4b5d-975e-5c96fd671dd5","Type":"ContainerStarted","Data":"96e4b7aadb4a8925d02102917b573e247320622fa25d47c806c8a8df896058a6"} Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.113824 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.115112 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" start-of-body= Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.115193 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.275826 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zmqn9 container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.276649 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" podUID="4de6c1e3-c4c6-47f9-951f-b07adc7744cf" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.287580 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zslxm container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.56:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.287891 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" podUID="8d06d09a-f602-4b44-a4d0-2566d02321df" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.56:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.396368 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" podUID="e08773f7-5eaf-4a76-b671-0681c02a3471" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.122:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.396526 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" podUID="e08773f7-5eaf-4a76-b671-0681c02a3471" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.122:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.396633 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.467651 4900 trace.go:236] Trace[737366910]: "Calculate volume metrics of persistence for pod openstack/rabbitmq-server-0" (27-Jan-2026 14:00:49.215) (total time: 5244ms): Jan 27 14:00:54 crc kubenswrapper[4900]: Trace[737366910]: [5.24464908s] [5.24464908s] END Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.467655 4900 trace.go:236] Trace[1346926128]: "Calculate volume metrics of storage for pod minio-dev/minio" (27-Jan-2026 14:00:48.112) (total time: 6348ms): Jan 27 14:00:54 crc kubenswrapper[4900]: Trace[1346926128]: [6.348168962s] [6.348168962s] END Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.621586 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" podUID="f3cc1727-3d00-43f4-92c3-5ef428297727" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.755557 4900 patch_prober.go:28] interesting pod/metrics-server-7dbbbb77f-fjj4n container/metrics-server namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.82:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.755632 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" podUID="61ef39b0-502c-45d5-be3a-e11c6ae19d59" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.82:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.755712 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.769741 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="metrics-server" containerStatusID={"Type":"cri-o","ID":"99bf9c8acda5d1f77bd4f37089b3e7e853fbbc968d684f9f2b5e55f9baf527a1"} pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" containerMessage="Container metrics-server failed liveness probe, will be restarted" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.769832 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" podUID="61ef39b0-502c-45d5-be3a-e11c6ae19d59" containerName="metrics-server" containerID="cri-o://99bf9c8acda5d1f77bd4f37089b3e7e853fbbc968d684f9f2b5e55f9baf527a1" gracePeriod=170 Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.795904 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.800518 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.805373 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.805457 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.852489 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-8mc5v" podUID="7513569c-d113-4de0-8d1c-734db1c14659" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.852707 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-8mc5v" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.853248 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-8mc5v" podUID="7513569c-d113-4de0-8d1c-734db1c14659" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.853379 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/speaker-8mc5v" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.853953 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.860178 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="speaker" containerStatusID={"Type":"cri-o","ID":"fb9c2c3038b700c46aa56a66aaabfe390e5bae0b6bba2f6097e740ccc01e07f5"} pod="metallb-system/speaker-8mc5v" containerMessage="Container speaker failed liveness probe, will be restarted" Jan 27 14:00:54 crc kubenswrapper[4900]: I0127 14:00:54.860303 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/speaker-8mc5v" podUID="7513569c-d113-4de0-8d1c-734db1c14659" containerName="speaker" containerID="cri-o://fb9c2c3038b700c46aa56a66aaabfe390e5bae0b6bba2f6097e740ccc01e07f5" gracePeriod=2 Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.045533 4900 patch_prober.go:28] interesting pod/nmstate-webhook-8474b5b9d8-82kkf container/nmstate-webhook namespace/openshift-nmstate: Readiness probe status=failure output="Get \"https://10.217.0.87:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.046213 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" podUID="b1058137-9f30-4107-a5a2-1a2edf16cbce" containerName="nmstate-webhook" probeResult="failure" output="Get \"https://10.217.0.87:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.088471 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" podUID="899811c4-fce0-42df-b3e7-9b1495cad676" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.154116 4900 patch_prober.go:28] interesting pod/monitoring-plugin-66c88bc574-zpdfk container/monitoring-plugin namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.83:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.154203 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" podUID="654e1706-9908-41f6-ba56-90e58ff3f665" containerName="monitoring-plugin" probeResult="failure" output="Get \"https://10.217.0.83:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.154776 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.162395 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="galera" containerStatusID={"Type":"cri-o","ID":"dab18b2a809157ae109d5e39c95348af980e050d00841ca94df479d1c2b86402"} pod="openstack/openstack-cell1-galera-0" containerMessage="Container galera failed liveness probe, will be restarted" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.162663 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" start-of-body= Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.162748 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.450528 4900 patch_prober.go:28] interesting pod/route-controller-manager-7b4c94f6f7-5v8z8 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.66:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.451077 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" podUID="af34c04d-2e4e-47f7-919b-5b56554880dc" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.66:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.450595 4900 patch_prober.go:28] interesting pod/route-controller-manager-7b4c94f6f7-5v8z8 container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.66:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.453885 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" podUID="af34c04d-2e4e-47f7-919b-5b56554880dc" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.66:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.453963 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.465077 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="route-controller-manager" containerStatusID={"Type":"cri-o","ID":"b30dd8bf02f344b723a3495fa66d536ba9a7ec093b091a8abbd6fa5edee17924"} pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" containerMessage="Container route-controller-manager failed liveness probe, will be restarted" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.465176 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" podUID="af34c04d-2e4e-47f7-919b-5b56554880dc" containerName="route-controller-manager" containerID="cri-o://b30dd8bf02f344b723a3495fa66d536ba9a7ec093b091a8abbd6fa5edee17924" gracePeriod=30 Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.753481 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/community-operators-zkzmp" podUID="008df9f5-f660-4c50-b9d1-adf18fa073d1" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:55 crc kubenswrapper[4900]: timeout: health rpc did not complete within 1s Jan 27 14:00:55 crc kubenswrapper[4900]: > Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.757473 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/community-operators-zkzmp" podUID="008df9f5-f660-4c50-b9d1-adf18fa073d1" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:55 crc kubenswrapper[4900]: timeout: health rpc did not complete within 1s Jan 27 14:00:55 crc kubenswrapper[4900]: > Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.794135 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.821826 4900 patch_prober.go:28] interesting pod/console-operator-58897d9998-x96hr container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.821922 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-x96hr" podUID="a3973a7c-f509-4769-93a6-4f71f99cc515" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.821929 4900 patch_prober.go:28] interesting pod/console-operator-58897d9998-x96hr container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.822093 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-x96hr" podUID="a3973a7c-f509-4769-93a6-4f71f99cc515" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.965661 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.965748 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.965850 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Liveness probe status=failure output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.965902 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.965669 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-8mc5v" podUID="7513569c-d113-4de0-8d1c-734db1c14659" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.965900 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.966209 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.967906 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="router" containerStatusID={"Type":"cri-o","ID":"8563ab30e2521d59b7bc253b46819ad4ef121d681d3c7cebdd6efe4151e7b324"} pod="openshift-ingress/router-default-5444994796-zvswh" containerMessage="Container router failed liveness probe, will be restarted" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.968045 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" containerID="cri-o://8563ab30e2521d59b7bc253b46819ad4ef121d681d3c7cebdd6efe4151e7b324" gracePeriod=10 Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.976587 4900 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zdrjw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.976685 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podUID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.976778 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.976818 4900 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zdrjw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.976904 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podUID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.977080 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.979618 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="catalog-operator" containerStatusID={"Type":"cri-o","ID":"defe4930cbdb8606dcabcddc515864def198ecf1db8f16ae3ca2fa48dee88898"} pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" containerMessage="Container catalog-operator failed liveness probe, will be restarted" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.979852 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podUID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerName="catalog-operator" containerID="cri-o://defe4930cbdb8606dcabcddc515864def198ecf1db8f16ae3ca2fa48dee88898" gracePeriod=30 Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.994004 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" start-of-body= Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.994115 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.995012 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" start-of-body= Jan 27 14:00:55 crc kubenswrapper[4900]: I0127 14:00:55.995363 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": dial tcp 10.217.0.27:5443: connect: connection refused" Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.072754 4900 prober.go:107] "Probe failed" probeType="Startup" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.113346 4900 patch_prober.go:28] interesting pod/perses-operator-5bf474d74f-cbtnw container/perses-operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.22:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.113471 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/perses-operator-5bf474d74f-cbtnw" podUID="b925e9c1-ac78-41d5-a783-88a95ae66df6" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.22:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.156389 4900 patch_prober.go:28] interesting pod/monitoring-plugin-66c88bc574-zpdfk container/monitoring-plugin namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.83:9443/health\": context deadline exceeded" start-of-body= Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.156504 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" podUID="654e1706-9908-41f6-ba56-90e58ff3f665" containerName="monitoring-plugin" probeResult="failure" output="Get \"https://10.217.0.83:9443/health\": context deadline exceeded" Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.193293 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-gnhhx" event={"ID":"ec5f276d-77b1-4fa8-b00b-7230c546a47f","Type":"ContainerStarted","Data":"3e61df8853653997fd5229a9fe845b5756f4fbfe059e4d251ecbde9791adb911"} Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.194146 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-gnhhx" Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.197509 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" event={"ID":"8031fe9b-6753-4ab7-abac-fece10fd066b","Type":"ContainerStarted","Data":"a601d8a79f2dd660ffdcc7cc365a939e02830f34134765a6312fe579f1e42f54"} Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.197804 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.390214 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/prometheus-metric-storage-0" podUID="ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.166:9090/-/healthy\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.390426 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.166:9090/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.794128 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.881555 4900 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-jnwth container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.881583 4900 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-jnwth container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.881714 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" podUID="9a6a8e52-38d8-41a6-863f-78255609c063" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.881802 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" podUID="9a6a8e52-38d8-41a6-863f-78255609c063" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.881847 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.881881 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.884201 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="olm-operator" containerStatusID={"Type":"cri-o","ID":"af8b4577d6aed3196e9a4ed07893dfe9678cfa0e73342e6e1f76f17118c3691a"} pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" containerMessage="Container olm-operator failed liveness probe, will be restarted" Jan 27 14:00:56 crc kubenswrapper[4900]: I0127 14:00:56.884259 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" podUID="9a6a8e52-38d8-41a6-863f-78255609c063" containerName="olm-operator" containerID="cri-o://af8b4577d6aed3196e9a4ed07893dfe9678cfa0e73342e6e1f76f17118c3691a" gracePeriod=30 Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.012374 4900 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-hffvz container/package-server-manager namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.012770 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" podUID="db5a985c-5b4c-4ab5-ab7c-61b356b88494" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.053364 4900 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-hffvz container/package-server-manager namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.053458 4900 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zdrjw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.053471 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hffvz" podUID="db5a985c-5b4c-4ab5-ab7c-61b356b88494" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.053561 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podUID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.053585 4900 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.053370 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.053831 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.053814 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.054003 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.090124 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-nlrz5" podUID="ddf9e91c-0239-4d06-af1e-9ef7d22e048a" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.441449 4900 patch_prober.go:28] interesting pod/controller-manager-866c485c84-8p95x container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": context deadline exceeded" start-of-body= Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.441871 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" podUID="bffb26f0-0279-4ad6-ba95-920c7a358068" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": context deadline exceeded" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.441971 4900 patch_prober.go:28] interesting pod/controller-manager-866c485c84-8p95x container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.442095 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" podUID="bffb26f0-0279-4ad6-ba95-920c7a358068" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.442169 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.484461 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="controller-manager" containerStatusID={"Type":"cri-o","ID":"83260d35c539356d18080818b54f5518d5f43ec2c8600fdc94fc6d9d1b832c94"} pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" containerMessage="Container controller-manager failed liveness probe, will be restarted" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.484547 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" podUID="bffb26f0-0279-4ad6-ba95-920c7a358068" containerName="controller-manager" containerID="cri-o://83260d35c539356d18080818b54f5518d5f43ec2c8600fdc94fc6d9d1b832c94" gracePeriod=30 Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.565777 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.623017 4900 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Liveness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.623135 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.623213 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.627778 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-scheduler" containerStatusID={"Type":"cri-o","ID":"1e78718c5347036ebcc7ca0f1b05b47f2dfbff5227c83fac9c7dee20c039c65c"} pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" containerMessage="Container kube-scheduler failed liveness probe, will be restarted" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.628004 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" containerID="cri-o://1e78718c5347036ebcc7ca0f1b05b47f2dfbff5227c83fac9c7dee20c039c65c" gracePeriod=30 Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.693202 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 14:00:57 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:00:57 crc kubenswrapper[4900]: > Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.806581 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-xvszg" podUID="147d12f6-3180-41d8-92c9-55aab763d313" containerName="registry-server" probeResult="failure" output="command timed out" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.807105 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.806665 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-index-xvszg" podUID="147d12f6-3180-41d8-92c9-55aab763d313" containerName="registry-server" probeResult="failure" output="command timed out" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.807631 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.809534 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"3afdbf8c69f56abc453426e1bfc981457680c7d1d0754b2efd44dd774a189048"} pod="openstack-operators/openstack-operator-index-xvszg" containerMessage="Container registry-server failed liveness probe, will be restarted" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.809597 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-xvszg" podUID="147d12f6-3180-41d8-92c9-55aab763d313" containerName="registry-server" containerID="cri-o://3afdbf8c69f56abc453426e1bfc981457680c7d1d0754b2efd44dd774a189048" gracePeriod=30 Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.849207 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.849417 4900 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": dial tcp 192.168.126.11:10259: connect: connection refused" start-of-body= Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.849470 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": dial tcp 192.168.126.11:10259: connect: connection refused" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.942141 4900 patch_prober.go:28] interesting pod/console-7f9b6cf6cc-5nhbt container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.138:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.942262 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-7f9b6cf6cc-5nhbt" podUID="57b93bd3-2b4f-45f5-9691-4b3f553c1c13" containerName="console" probeResult="failure" output="Get \"https://10.217.0.138:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:57 crc kubenswrapper[4900]: I0127 14:00:57.942417 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.189293 4900 patch_prober.go:28] interesting pod/logging-loki-distributor-5f678c8dd6-k62tn container/loki-distributor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.189406 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" podUID="bf3e0b5e-77aa-4f51-9cca-149e20525f8f" containerName="loki-distributor" probeResult="failure" output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.189565 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.228743 4900 generic.go:334] "Generic (PLEG): container finished" podID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerID="defe4930cbdb8606dcabcddc515864def198ecf1db8f16ae3ca2fa48dee88898" exitCode=0 Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.228841 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" event={"ID":"398f26a6-4944-4c9b-926a-b4ef22eb2a1f","Type":"ContainerDied","Data":"defe4930cbdb8606dcabcddc515864def198ecf1db8f16ae3ca2fa48dee88898"} Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.232177 4900 generic.go:334] "Generic (PLEG): container finished" podID="7513569c-d113-4de0-8d1c-734db1c14659" containerID="fb9c2c3038b700c46aa56a66aaabfe390e5bae0b6bba2f6097e740ccc01e07f5" exitCode=137 Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.232213 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-8mc5v" event={"ID":"7513569c-d113-4de0-8d1c-734db1c14659","Type":"ContainerDied","Data":"fb9c2c3038b700c46aa56a66aaabfe390e5bae0b6bba2f6097e740ccc01e07f5"} Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.357493 4900 patch_prober.go:28] interesting pod/logging-loki-querier-76788598db-rg7hv container/loki-querier namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.53:3101/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.357607 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" podUID="f1cfe76c-2aba-4da6-a7a7-fa01e883cb60" containerName="loki-querier" probeResult="failure" output="Get \"https://10.217.0.53:3101/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.357757 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.396329 4900 patch_prober.go:28] interesting pod/loki-operator-controller-manager-849c99c676-jbpgt container/manager namespace/openshift-operators-redhat: Readiness probe status=failure output="Get \"http://10.217.0.47:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.396416 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators-redhat/loki-operator-controller-manager-849c99c676-jbpgt" podUID="386bc10f-9e5d-49d0-9906-e97f1796d49d" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.47:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.441745 4900 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-rds4l container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.442114 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" podUID="0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.442239 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.446369 4900 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-rds4l container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.75:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.446470 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" podUID="0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.75:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.446550 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.460390 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="prometheus-operator-admission-webhook" containerStatusID={"Type":"cri-o","ID":"7a6d7c54d44701c25092d982003a1fcce8badc067d63a3f8fbdce05e3476b742"} pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" containerMessage="Container prometheus-operator-admission-webhook failed liveness probe, will be restarted" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.460497 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" podUID="0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677" containerName="prometheus-operator-admission-webhook" containerID="cri-o://7a6d7c54d44701c25092d982003a1fcce8badc067d63a3f8fbdce05e3476b742" gracePeriod=30 Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.490797 4900 patch_prober.go:28] interesting pod/logging-loki-query-frontend-69d9546745-mlj7c container/loki-query-frontend namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.490866 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" podUID="62fdb605-a4e3-443d-9887-1ebc8218908f" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.54:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.555709 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.689697 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-69d9546745-mlj7c" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.763651 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-76788598db-rg7hv" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.766908 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-k62tn" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.796267 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-k8s-0" podUID="f3eb0985-f56f-4111-86c6-d511433058c0" containerName="prometheus" probeResult="failure" output="command timed out" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.798466 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="f3eb0985-f56f-4111-86c6-d511433058c0" containerName="prometheus" probeResult="failure" output="command timed out" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.798629 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.957519 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-687f57d79b-4f2kx" podUID="c35e074b-0e8e-4d1f-8d2c-5c23cf320f25" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.958106 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="cert-manager/cert-manager-webhook-687f57d79b-4f2kx" podUID="c35e074b-0e8e-4d1f-8d2c-5c23cf320f25" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.958239 4900 patch_prober.go:28] interesting pod/console-7f9b6cf6cc-5nhbt container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.138:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:58 crc kubenswrapper[4900]: I0127 14:00:58.958274 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-7f9b6cf6cc-5nhbt" podUID="57b93bd3-2b4f-45f5-9691-4b3f553c1c13" containerName="console" probeResult="failure" output="Get \"https://10.217.0.138:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.275571 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zmqn9 container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.275682 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" podUID="4de6c1e3-c4c6-47f9-951f-b07adc7744cf" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.276182 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zmqn9 container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.276203 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" podUID="4de6c1e3-c4c6-47f9-951f-b07adc7744cf" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.282237 4900 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="1e78718c5347036ebcc7ca0f1b05b47f2dfbff5227c83fac9c7dee20c039c65c" exitCode=0 Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.282308 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"1e78718c5347036ebcc7ca0f1b05b47f2dfbff5227c83fac9c7dee20c039c65c"} Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.293246 4900 generic.go:334] "Generic (PLEG): container finished" podID="9a6a8e52-38d8-41a6-863f-78255609c063" containerID="af8b4577d6aed3196e9a4ed07893dfe9678cfa0e73342e6e1f76f17118c3691a" exitCode=0 Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.295149 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" event={"ID":"9a6a8e52-38d8-41a6-863f-78255609c063","Type":"ContainerDied","Data":"af8b4577d6aed3196e9a4ed07893dfe9678cfa0e73342e6e1f76f17118c3691a"} Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.298986 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zslxm container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.56:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.299080 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" podUID="8d06d09a-f602-4b44-a4d0-2566d02321df" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.56:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.299292 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zslxm container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.56:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.299428 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" podUID="8d06d09a-f602-4b44-a4d0-2566d02321df" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.56:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.361502 4900 patch_prober.go:28] interesting pod/etcd-crc container/etcd namespace/openshift-etcd: Liveness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=failed to establish etcd client: giving up getting a cached client after 3 tries Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.361598 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-etcd/etcd-crc" podUID="2139d3e2895fc6797b9c76a1b4c9886d" containerName="etcd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.446317 4900 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-rds4l container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.446431 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" podUID="0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.75:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.481978 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 14:00:59 crc kubenswrapper[4900]: I0127 14:00:59.644457 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-controller-manager-694495f969-v6psl" podUID="88d8052f-1988-4229-abc5-100335ed01e2" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8080/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:00:59.996436 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" podUID="0d203d65-c23c-4e25-b72b-7b5a69441b5f" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.93:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:00:59.996609 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:00:59.996902 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" podUID="0d203d65-c23c-4e25-b72b-7b5a69441b5f" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.93:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:00:59.997009 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:01:00.006905 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="webhook-server" containerStatusID={"Type":"cri-o","ID":"c9aaccf51243d1ad8d169bde83a0f99376ea5cf45a09ecc63fad8e379a6a51e5"} pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" containerMessage="Container webhook-server failed liveness probe, will be restarted" Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:01:00.007287 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" podUID="0d203d65-c23c-4e25-b72b-7b5a69441b5f" containerName="webhook-server" containerID="cri-o://c9aaccf51243d1ad8d169bde83a0f99376ea5cf45a09ecc63fad8e379a6a51e5" gracePeriod=2 Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:01:00.262371 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:01:00.274404 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zmqn9 container/opa namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.55:8083/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:01:00.274499 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zmqn9" podUID="4de6c1e3-c4c6-47f9-951f-b07adc7744cf" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:01:00.296778 4900 patch_prober.go:28] interesting pod/logging-loki-gateway-7dbfd5bb68-zslxm container/opa namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.56:8083/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:01:00.296865 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-7dbfd5bb68-zslxm" podUID="8d06d09a-f602-4b44-a4d0-2566d02321df" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.56:8083/live\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:01:00.330028 4900 generic.go:334] "Generic (PLEG): container finished" podID="780131af-30a7-406a-8ae9-b9a3a0826d1e" containerID="7b6ea029f6d464c8d975de774809f4a8ad2d400f9a65de460b9211083ded43f3" exitCode=0 Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:01:00.330152 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"780131af-30a7-406a-8ae9-b9a3a0826d1e","Type":"ContainerDied","Data":"7b6ea029f6d464c8d975de774809f4a8ad2d400f9a65de460b9211083ded43f3"} Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:01:00.448736 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:01:00.550030 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:01:00.699014 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" Jan 27 14:01:00 crc kubenswrapper[4900]: I0127 14:01:00.781638 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-xvszg" podUID="147d12f6-3180-41d8-92c9-55aab763d313" containerName="registry-server" probeResult="failure" output=< Jan 27 14:01:00 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:01:00 crc kubenswrapper[4900]: > Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.040014 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" podUID="0d203d65-c23c-4e25-b72b-7b5a69441b5f" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.93:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.081553 4900 prober.go:107] "Probe failed" probeType="Startup" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.163938 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.273361 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-768b776ffb-2cndf" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.362160 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.379050 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" event={"ID":"398f26a6-4944-4c9b-926a-b4ef22eb2a1f","Type":"ContainerStarted","Data":"17b3da1e2253dcd9feb2468bf83127372a66140902f1ddc8f475f1365a3a7940"} Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.379540 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.380036 4900 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zdrjw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.380124 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podUID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.391905 4900 generic.go:334] "Generic (PLEG): container finished" podID="0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677" containerID="7a6d7c54d44701c25092d982003a1fcce8badc067d63a3f8fbdce05e3476b742" exitCode=0 Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.391997 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" event={"ID":"0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677","Type":"ContainerDied","Data":"7a6d7c54d44701c25092d982003a1fcce8badc067d63a3f8fbdce05e3476b742"} Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.396579 4900 generic.go:334] "Generic (PLEG): container finished" podID="147d12f6-3180-41d8-92c9-55aab763d313" containerID="3afdbf8c69f56abc453426e1bfc981457680c7d1d0754b2efd44dd774a189048" exitCode=0 Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.396674 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-xvszg" event={"ID":"147d12f6-3180-41d8-92c9-55aab763d313","Type":"ContainerDied","Data":"3afdbf8c69f56abc453426e1bfc981457680c7d1d0754b2efd44dd774a189048"} Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.402798 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" event={"ID":"9a6a8e52-38d8-41a6-863f-78255609c063","Type":"ContainerStarted","Data":"d0073a6e2b6e08631014a1fe74a673064190222673907e3c816ce5eda9a5362a"} Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.404411 4900 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-jnwth container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.404485 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.404509 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" podUID="9a6a8e52-38d8-41a6-863f-78255609c063" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.416137 4900 generic.go:334] "Generic (PLEG): container finished" podID="bffb26f0-0279-4ad6-ba95-920c7a358068" containerID="83260d35c539356d18080818b54f5518d5f43ec2c8600fdc94fc6d9d1b832c94" exitCode=0 Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.416214 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" event={"ID":"bffb26f0-0279-4ad6-ba95-920c7a358068","Type":"ContainerDied","Data":"83260d35c539356d18080818b54f5518d5f43ec2c8600fdc94fc6d9d1b832c94"} Jan 27 14:01:01 crc kubenswrapper[4900]: E0127 14:01:01.478656 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf34c04d_2e4e_47f7_919b_5b56554880dc.slice/crio-b30dd8bf02f344b723a3495fa66d536ba9a7ec093b091a8abbd6fa5edee17924.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf34c04d_2e4e_47f7_919b_5b56554880dc.slice/crio-conmon-b30dd8bf02f344b723a3495fa66d536ba9a7ec093b091a8abbd6fa5edee17924.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod147d12f6_3180_41d8_92c9_55aab763d313.slice/crio-conmon-3afdbf8c69f56abc453426e1bfc981457680c7d1d0754b2efd44dd774a189048.scope\": RecentStats: unable to find data in memory cache]" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.595263 4900 trace.go:236] Trace[2010430979]: "Calculate volume metrics of ovndbcluster-sb-etc-ovn for pod openstack/ovsdbserver-sb-0" (27-Jan-2026 14:00:57.170) (total time: 4417ms): Jan 27 14:01:01 crc kubenswrapper[4900]: Trace[2010430979]: [4.41780526s] [4.41780526s] END Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.655823 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" podUID="e8b4a268-6430-4f23-bd93-aa62b52710a6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.105:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.656142 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" podUID="e8b4a268-6430-4f23-bd93-aa62b52710a6" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.105:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.656687 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.658309 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.671903 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-k8s-0" Jan 27 14:01:01 crc kubenswrapper[4900]: I0127 14:01:01.842100 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" Jan 27 14:01:02 crc kubenswrapper[4900]: E0127 14:01:02.074660 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3afdbf8c69f56abc453426e1bfc981457680c7d1d0754b2efd44dd774a189048 is running failed: container process not found" containerID="3afdbf8c69f56abc453426e1bfc981457680c7d1d0754b2efd44dd774a189048" cmd=["grpc_health_probe","-addr=:50051"] Jan 27 14:01:02 crc kubenswrapper[4900]: E0127 14:01:02.075621 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3afdbf8c69f56abc453426e1bfc981457680c7d1d0754b2efd44dd774a189048 is running failed: container process not found" containerID="3afdbf8c69f56abc453426e1bfc981457680c7d1d0754b2efd44dd774a189048" cmd=["grpc_health_probe","-addr=:50051"] Jan 27 14:01:02 crc kubenswrapper[4900]: E0127 14:01:02.076349 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3afdbf8c69f56abc453426e1bfc981457680c7d1d0754b2efd44dd774a189048 is running failed: container process not found" containerID="3afdbf8c69f56abc453426e1bfc981457680c7d1d0754b2efd44dd774a189048" cmd=["grpc_health_probe","-addr=:50051"] Jan 27 14:01:02 crc kubenswrapper[4900]: E0127 14:01:02.076465 4900 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3afdbf8c69f56abc453426e1bfc981457680c7d1d0754b2efd44dd774a189048 is running failed: container process not found" probeType="Readiness" pod="openstack-operators/openstack-operator-index-xvszg" podUID="147d12f6-3180-41d8-92c9-55aab763d313" containerName="registry-server" Jan 27 14:01:02 crc kubenswrapper[4900]: I0127 14:01:02.150613 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-rdllj" Jan 27 14:01:02 crc kubenswrapper[4900]: I0127 14:01:02.461587 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" event={"ID":"0d203d65-c23c-4e25-b72b-7b5a69441b5f","Type":"ContainerDied","Data":"c9aaccf51243d1ad8d169bde83a0f99376ea5cf45a09ecc63fad8e379a6a51e5"} Jan 27 14:01:02 crc kubenswrapper[4900]: I0127 14:01:02.461534 4900 generic.go:334] "Generic (PLEG): container finished" podID="0d203d65-c23c-4e25-b72b-7b5a69441b5f" containerID="c9aaccf51243d1ad8d169bde83a0f99376ea5cf45a09ecc63fad8e379a6a51e5" exitCode=0 Jan 27 14:01:02 crc kubenswrapper[4900]: I0127 14:01:02.513499 4900 generic.go:334] "Generic (PLEG): container finished" podID="af34c04d-2e4e-47f7-919b-5b56554880dc" containerID="b30dd8bf02f344b723a3495fa66d536ba9a7ec093b091a8abbd6fa5edee17924" exitCode=0 Jan 27 14:01:02 crc kubenswrapper[4900]: I0127 14:01:02.521181 4900 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-jnwth container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Jan 27 14:01:02 crc kubenswrapper[4900]: I0127 14:01:02.521260 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" podUID="9a6a8e52-38d8-41a6-863f-78255609c063" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" Jan 27 14:01:02 crc kubenswrapper[4900]: I0127 14:01:02.537445 4900 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zdrjw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Jan 27 14:01:02 crc kubenswrapper[4900]: I0127 14:01:02.549142 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podUID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Jan 27 14:01:02 crc kubenswrapper[4900]: I0127 14:01:02.539107 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-kbwqx" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" probeResult="failure" output=< Jan 27 14:01:02 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:01:02 crc kubenswrapper[4900]: > Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:02.752992 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" containerID="cri-o://dab18b2a809157ae109d5e39c95348af980e050d00841ca94df479d1c2b86402" gracePeriod=23 Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:02.817513 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" containerID="cri-o://6fdd71884020c539bd9777c35cbe22887618ed748b95b5f0a35e3375e433dbda" gracePeriod=21 Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:03.234256 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-d8fd5ccf5-5h9ll" Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:03.234715 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" event={"ID":"af34c04d-2e4e-47f7-919b-5b56554880dc","Type":"ContainerDied","Data":"b30dd8bf02f344b723a3495fa66d536ba9a7ec093b091a8abbd6fa5edee17924"} Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:03.239509 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-82kkf" Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:03.477573 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:03.690339 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" event={"ID":"0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677","Type":"ContainerStarted","Data":"2dba9d7bc7e385c683cff166736add10fa94d9514b4ac7268614fc09e8cb3473"} Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:03.696033 4900 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-rds4l container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.75:8443/healthz\": dial tcp 10.217.0.75:8443: connect: connection refused" start-of-body= Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:03.696164 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" podUID="0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.75:8443/healthz\": dial tcp 10.217.0.75:8443: connect: connection refused" Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:03.697640 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:03.750999 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" event={"ID":"bffb26f0-0279-4ad6-ba95-920c7a358068","Type":"ContainerStarted","Data":"f98d961130efc4b62b343bf3ff67755daa6c764a75352033a80d3d9434cfad85"} Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:03.765569 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:03.765786 4900 patch_prober.go:28] interesting pod/controller-manager-866c485c84-8p95x container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": dial tcp 10.217.0.67:8443: connect: connection refused" start-of-body= Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:03.765834 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" podUID="bffb26f0-0279-4ad6-ba95-920c7a358068" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": dial tcp 10.217.0.67:8443: connect: connection refused" Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:03.780454 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-8mc5v" podUID="7513569c-d113-4de0-8d1c-734db1c14659" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": dial tcp [::1]:29150: connect: connection refused" Jan 27 14:01:03 crc kubenswrapper[4900]: E0127 14:01:03.780473 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dab18b2a809157ae109d5e39c95348af980e050d00841ca94df479d1c2b86402" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 27 14:01:03 crc kubenswrapper[4900]: E0127 14:01:03.784518 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dab18b2a809157ae109d5e39c95348af980e050d00841ca94df479d1c2b86402" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 27 14:01:03 crc kubenswrapper[4900]: E0127 14:01:03.798416 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dab18b2a809157ae109d5e39c95348af980e050d00841ca94df479d1c2b86402" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 27 14:01:03 crc kubenswrapper[4900]: E0127 14:01:03.798562 4900 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" Jan 27 14:01:03 crc kubenswrapper[4900]: I0127 14:01:03.820891 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.456564 4900 patch_prober.go:28] interesting pod/route-controller-manager-7b4c94f6f7-5v8z8 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.66:8443/healthz\": dial tcp 10.217.0.66:8443: connect: connection refused" start-of-body= Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.457492 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" podUID="af34c04d-2e4e-47f7-919b-5b56554880dc" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.66:8443/healthz\": dial tcp 10.217.0.66:8443: connect: connection refused" Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.548950 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="6bf50105-b28f-4123-a6f8-75124e213fcc" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.737245 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/monitoring-plugin-66c88bc574-zpdfk" Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.947368 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"04f6cdbfb6335668f9c8c5c6da1cf2fd0ff77d1173f3e3c1e145b2dee9fecc06"} Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.952036 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]backend-http ok Jan 27 14:01:04 crc kubenswrapper[4900]: [+]has-synced ok Jan 27 14:01:04 crc kubenswrapper[4900]: [-]process-running failed: reason withheld Jan 27 14:01:04 crc kubenswrapper[4900]: healthz check failed Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.952134 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.953575 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.977664 4900 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zdrjw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.977739 4900 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zdrjw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.977746 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podUID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.977774 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" podUID="398f26a6-4944-4c9b-926a-b4ef22eb2a1f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.982767 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" event={"ID":"af34c04d-2e4e-47f7-919b-5b56554880dc","Type":"ContainerStarted","Data":"cacf97c3d475ad0191e710d5df09f00295ade7a2aa5745d1f08f9ca705e576b8"} Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.982864 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.983423 4900 patch_prober.go:28] interesting pod/route-controller-manager-7b4c94f6f7-5v8z8 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.66:8443/healthz\": dial tcp 10.217.0.66:8443: connect: connection refused" start-of-body= Jan 27 14:01:04 crc kubenswrapper[4900]: I0127 14:01:04.983495 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" podUID="af34c04d-2e4e-47f7-919b-5b56554880dc" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.66:8443/healthz\": dial tcp 10.217.0.66:8443: connect: connection refused" Jan 27 14:01:05 crc kubenswrapper[4900]: I0127 14:01:05.106423 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 14:01:05 crc kubenswrapper[4900]: I0127 14:01:05.188736 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-gnhhx" Jan 27 14:01:05 crc kubenswrapper[4900]: I0127 14:01:05.238725 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-gnhhx" Jan 27 14:01:05 crc kubenswrapper[4900]: I0127 14:01:05.239685 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-8mc5v" event={"ID":"7513569c-d113-4de0-8d1c-734db1c14659","Type":"ContainerStarted","Data":"f52364ac0adde77a55b89bae7b4ebcfe2db86a624d3e6c49205bc5db92b79764"} Jan 27 14:01:05 crc kubenswrapper[4900]: I0127 14:01:05.244895 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-8mc5v" Jan 27 14:01:05 crc kubenswrapper[4900]: I0127 14:01:05.248590 4900 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-rds4l container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.75:8443/healthz\": dial tcp 10.217.0.75:8443: connect: connection refused" start-of-body= Jan 27 14:01:05 crc kubenswrapper[4900]: I0127 14:01:05.248706 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" podUID="0ccb2369-fae3-4dbd-8a7c-0e0d49bd8677" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.75:8443/healthz\": dial tcp 10.217.0.75:8443: connect: connection refused" Jan 27 14:01:05 crc kubenswrapper[4900]: I0127 14:01:05.268700 4900 patch_prober.go:28] interesting pod/controller-manager-866c485c84-8p95x container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": dial tcp 10.217.0.67:8443: connect: connection refused" start-of-body= Jan 27 14:01:05 crc kubenswrapper[4900]: I0127 14:01:05.268772 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" podUID="bffb26f0-0279-4ad6-ba95-920c7a358068" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": dial tcp 10.217.0.67:8443: connect: connection refused" Jan 27 14:01:05 crc kubenswrapper[4900]: I0127 14:01:05.938723 4900 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-jnwth container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Jan 27 14:01:05 crc kubenswrapper[4900]: I0127 14:01:05.939627 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" podUID="9a6a8e52-38d8-41a6-863f-78255609c063" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" Jan 27 14:01:05 crc kubenswrapper[4900]: I0127 14:01:05.943136 4900 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-jnwth container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Jan 27 14:01:05 crc kubenswrapper[4900]: I0127 14:01:05.943283 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" podUID="9a6a8e52-38d8-41a6-863f-78255609c063" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" Jan 27 14:01:06 crc kubenswrapper[4900]: I0127 14:01:06.292876 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-xvszg" event={"ID":"147d12f6-3180-41d8-92c9-55aab763d313","Type":"ContainerStarted","Data":"8ba95067ba6aa20d8a75b786d45b92cfb2f2d40554558f12aa9da65c288876d9"} Jan 27 14:01:06 crc kubenswrapper[4900]: I0127 14:01:06.323371 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" event={"ID":"0d203d65-c23c-4e25-b72b-7b5a69441b5f","Type":"ContainerStarted","Data":"b600ff63f7195989c4b45c00b6c574f2e832175c2ff0d0dcc70d46601c8ad314"} Jan 27 14:01:06 crc kubenswrapper[4900]: I0127 14:01:06.388763 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ingress_router-default-5444994796-zvswh_0703acf1-af71-4249-b1ef-e19c6beb4d86/router/0.log" Jan 27 14:01:06 crc kubenswrapper[4900]: I0127 14:01:06.389420 4900 generic.go:334] "Generic (PLEG): container finished" podID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerID="8563ab30e2521d59b7bc253b46819ad4ef121d681d3c7cebdd6efe4151e7b324" exitCode=137 Jan 27 14:01:06 crc kubenswrapper[4900]: I0127 14:01:06.395417 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-zvswh" event={"ID":"0703acf1-af71-4249-b1ef-e19c6beb4d86","Type":"ContainerDied","Data":"8563ab30e2521d59b7bc253b46819ad4ef121d681d3c7cebdd6efe4151e7b324"} Jan 27 14:01:06 crc kubenswrapper[4900]: I0127 14:01:06.416080 4900 patch_prober.go:28] interesting pod/route-controller-manager-7b4c94f6f7-5v8z8 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.66:8443/healthz\": dial tcp 10.217.0.66:8443: connect: connection refused" start-of-body= Jan 27 14:01:06 crc kubenswrapper[4900]: I0127 14:01:06.416197 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" podUID="af34c04d-2e4e-47f7-919b-5b56554880dc" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.66:8443/healthz\": dial tcp 10.217.0.66:8443: connect: connection refused" Jan 27 14:01:06 crc kubenswrapper[4900]: I0127 14:01:06.498739 4900 patch_prober.go:28] interesting pod/controller-manager-866c485c84-8p95x container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.67:8443/healthz\": dial tcp 10.217.0.67:8443: connect: connection refused" start-of-body= Jan 27 14:01:06 crc kubenswrapper[4900]: I0127 14:01:06.498805 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" podUID="bffb26f0-0279-4ad6-ba95-920c7a358068" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.67:8443/healthz\": dial tcp 10.217.0.67:8443: connect: connection refused" Jan 27 14:01:07 crc kubenswrapper[4900]: I0127 14:01:06.994719 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:01:07 crc kubenswrapper[4900]: I0127 14:01:06.995681 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:07 crc kubenswrapper[4900]: I0127 14:01:07.013500 4900 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-hn2jw container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 27 14:01:07 crc kubenswrapper[4900]: I0127 14:01:07.013601 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" podUID="665d09a8-9764-4b5d-975e-5c96fd671dd5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.27:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:07 crc kubenswrapper[4900]: I0127 14:01:07.038005 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7f9b6cf6cc-5nhbt" Jan 27 14:01:07 crc kubenswrapper[4900]: I0127 14:01:07.321032 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="6bf50105-b28f-4123-a6f8-75124e213fcc" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 14:01:07 crc kubenswrapper[4900]: I0127 14:01:07.468010 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-rds4l" Jan 27 14:01:07 crc kubenswrapper[4900]: I0127 14:01:07.735809 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 14:01:07 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:01:07 crc kubenswrapper[4900]: > Jan 27 14:01:08 crc kubenswrapper[4900]: I0127 14:01:08.585452 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"780131af-30a7-406a-8ae9-b9a3a0826d1e","Type":"ContainerStarted","Data":"bcd8cdce38bcb478b796d0cf61d133d5e80bfc739e76b386e072b8d44542cafd"} Jan 27 14:01:09 crc kubenswrapper[4900]: I0127 14:01:09.798129 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="780131af-30a7-406a-8ae9-b9a3a0826d1e" containerName="ceilometer-notification-agent" probeResult="failure" output="command timed out" Jan 27 14:01:09 crc kubenswrapper[4900]: I0127 14:01:09.892816 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ingress_router-default-5444994796-zvswh_0703acf1-af71-4249-b1ef-e19c6beb4d86/router/0.log" Jan 27 14:01:09 crc kubenswrapper[4900]: I0127 14:01:09.893263 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-zvswh" event={"ID":"0703acf1-af71-4249-b1ef-e19c6beb4d86","Type":"ContainerStarted","Data":"537adffc7d16558f4edff57071512e2ee97e38f18b5f725d3ef825b484cd8862"} Jan 27 14:01:10 crc kubenswrapper[4900]: I0127 14:01:10.100131 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="6bf50105-b28f-4123-a6f8-75124e213fcc" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 27 14:01:10 crc kubenswrapper[4900]: I0127 14:01:10.100523 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 27 14:01:10 crc kubenswrapper[4900]: I0127 14:01:10.123656 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="cinder-scheduler" containerStatusID={"Type":"cri-o","ID":"af3c570545cf826144035311b08eba9b878bb342fee4e73f72daffd5d60b8907"} pod="openstack/cinder-scheduler-0" containerMessage="Container cinder-scheduler failed liveness probe, will be restarted" Jan 27 14:01:10 crc kubenswrapper[4900]: I0127 14:01:10.137856 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="6bf50105-b28f-4123-a6f8-75124e213fcc" containerName="cinder-scheduler" containerID="cri-o://af3c570545cf826144035311b08eba9b878bb342fee4e73f72daffd5d60b8907" gracePeriod=30 Jan 27 14:01:10 crc kubenswrapper[4900]: I0127 14:01:10.202583 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-9blf4" Jan 27 14:01:10 crc kubenswrapper[4900]: I0127 14:01:10.838257 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 14:01:10 crc kubenswrapper[4900]: I0127 14:01:10.886081 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 27 14:01:10 crc kubenswrapper[4900]: I0127 14:01:10.886250 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 27 14:01:10 crc kubenswrapper[4900]: I0127 14:01:10.935598 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-chtfl_ac68471d-4581-477c-bcff-415b8f8ea21e/registry-server/2.log" Jan 27 14:01:10 crc kubenswrapper[4900]: I0127 14:01:10.980001 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-chtfl" event={"ID":"ac68471d-4581-477c-bcff-415b8f8ea21e","Type":"ContainerDied","Data":"edf34d1f731a0ed88ee92135cb225664c8ae3dfdfe5de507d718d31c06dfc817"} Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:10.997410 4900 generic.go:334] "Generic (PLEG): container finished" podID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerID="edf34d1f731a0ed88ee92135cb225664c8ae3dfdfe5de507d718d31c06dfc817" exitCode=137 Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:11.028683 4900 scope.go:117] "RemoveContainer" containerID="a8487f10c83c07590622fc9dd4f16f46872ef36a132cd0cdf5124d0695609b41" Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:11.102400 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-gnhhx" podUID="ec5f276d-77b1-4fa8-b00b-7230c546a47f" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:11.206588 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-655bf9cfbb-7wh5z" podUID="1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:11.241266 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-77554cdc5c-jxgdh" podUID="b094071d-c368-40e6-8515-a17d0a22a868" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.103:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:11.493727 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-65ff799cfd-z72lx" podUID="e3cba13f-5396-4c71-8f81-d2d932baca1f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:11.543121 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-67dd55ff59-dl6ln" Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:11.572964 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/heat-operator-controller-manager-575ffb885b-wvhzp" podUID="5155088c-b873-4fac-b1e9-87f57c2fae68" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.104:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:11.681492 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-82q5c" podUID="175105c2-dfc2-4752-bf75-a027d86dc373" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:11.735411 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/keystone-operator-controller-manager-55f684fd56-ns96g" podUID="e8365f5d-b2f2-4cab-a803-e722c65ae307" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:11.800597 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r" podUID="a6dce274-9090-44fc-ac6b-6e164e5b7192" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:11.845630 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:11.845749 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:11.938295 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-849fcfbb6b-9m826" podUID="4967ec79-a9dd-438a-9cb7-b89b3af09ff5" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.111:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:11 crc kubenswrapper[4900]: I0127 14:01:11.938370 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-7ffd8d76d4-xfh5t" podUID="5104a740-a23d-4ea4-a186-97768d490075" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.112:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:12 crc kubenswrapper[4900]: I0127 14:01:12.056207 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 14:01:12 crc kubenswrapper[4900]: I0127 14:01:12.056676 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 14:01:12 crc kubenswrapper[4900]: I0127 14:01:12.077343 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-fbd766fb6-25hgc" podUID="a988e8ab-311d-4b6a-a75e-c49601a77d46" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.113:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:12 crc kubenswrapper[4900]: I0127 14:01:12.120640 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-7875d7675-j95fk" podUID="65b8356b-f64f-4cb8-94af-6b8d45448a63" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.114:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:12 crc kubenswrapper[4900]: I0127 14:01:12.223013 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-fkq26" podUID="0c0782a0-6d83-4760-82dd-cea358647713" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:12 crc kubenswrapper[4900]: I0127 14:01:12.390531 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-dkghw" podUID="5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.118:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:12 crc kubenswrapper[4900]: I0127 14:01:12.390711 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-659968c8f5-zkwns" podUID="1759ba9c-7c4a-4380-81f5-e67d8e418fa1" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.119:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:12 crc kubenswrapper[4900]: I0127 14:01:12.448380 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-krlqc" podUID="5d4cc48d-12ab-458e-bf29-bc87a182f5c3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.120:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:12 crc kubenswrapper[4900]: I0127 14:01:12.493370 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-7579fb95dd-5zsrz" podUID="be0258a0-aba9-4900-b507-4767b2726a69" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.121:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:12 crc kubenswrapper[4900]: E0127 14:01:12.495093 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6fdd71884020c539bd9777c35cbe22887618ed748b95b5f0a35e3375e433dbda" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 27 14:01:12 crc kubenswrapper[4900]: E0127 14:01:12.504619 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6fdd71884020c539bd9777c35cbe22887618ed748b95b5f0a35e3375e433dbda" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 27 14:01:12 crc kubenswrapper[4900]: E0127 14:01:12.513211 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6fdd71884020c539bd9777c35cbe22887618ed748b95b5f0a35e3375e433dbda" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 27 14:01:12 crc kubenswrapper[4900]: E0127 14:01:12.513359 4900 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="7626ad91-9f29-4dae-969a-e23d420319ac" containerName="galera" Jan 27 14:01:12 crc kubenswrapper[4900]: I0127 14:01:12.840567 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 27 14:01:12 crc kubenswrapper[4900]: I0127 14:01:12.840694 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 27 14:01:13 crc kubenswrapper[4900]: E0127 14:01:13.732578 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dab18b2a809157ae109d5e39c95348af980e050d00841ca94df479d1c2b86402" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 27 14:01:13 crc kubenswrapper[4900]: E0127 14:01:13.736822 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dab18b2a809157ae109d5e39c95348af980e050d00841ca94df479d1c2b86402" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 27 14:01:13 crc kubenswrapper[4900]: E0127 14:01:13.832197 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="dab18b2a809157ae109d5e39c95348af980e050d00841ca94df479d1c2b86402" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 27 14:01:13 crc kubenswrapper[4900]: E0127 14:01:13.832406 4900 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" Jan 27 14:01:13 crc kubenswrapper[4900]: I0127 14:01:13.842351 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 27 14:01:13 crc kubenswrapper[4900]: I0127 14:01:13.842473 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.034677 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" podUID="899811c4-fce0-42df-b3e7-9b1495cad676" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.034837 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.035517 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" podUID="899811c4-fce0-42df-b3e7-9b1495cad676" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.237212 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"f6f5f302e7703bf610746eb0aefa198f328bdd65009e6ebf2fb182c19e425e5f"} pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" containerMessage="Container manager failed liveness probe, will be restarted" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.237304 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" podUID="899811c4-fce0-42df-b3e7-9b1495cad676" containerName="manager" containerID="cri-o://f6f5f302e7703bf610746eb0aefa198f328bdd65009e6ebf2fb182c19e425e5f" gracePeriod=10 Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.393441 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" podUID="e08773f7-5eaf-4a76-b671-0681c02a3471" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.122:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.394149 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" podUID="e08773f7-5eaf-4a76-b671-0681c02a3471" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.122:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.394201 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.476128 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-kbwqx" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" probeResult="failure" output=< Jan 27 14:01:14 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:01:14 crc kubenswrapper[4900]: > Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.476909 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="manager" containerStatusID={"Type":"cri-o","ID":"34a9594c6ae90d00461e21b81bedad93da2bc761e8d58e4762f7d4aa27511d4a"} pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" containerMessage="Container manager failed liveness probe, will be restarted" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.477043 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" podUID="e08773f7-5eaf-4a76-b671-0681c02a3471" containerName="manager" containerID="cri-o://34a9594c6ae90d00461e21b81bedad93da2bc761e8d58e4762f7d4aa27511d4a" gracePeriod=10 Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.482750 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-chtfl_ac68471d-4581-477c-bcff-415b8f8ea21e/registry-server/2.log" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.787986 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7b4c94f6f7-5v8z8" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.788697 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-8mc5v" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.788724 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29492041-82dpf"] Jan 27 14:01:14 crc kubenswrapper[4900]: E0127 14:01:14.826909 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="582d5425-f779-4617-843e-f68dcfa2e79d" containerName="collect-profiles" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.826974 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="582d5425-f779-4617-843e-f68dcfa2e79d" containerName="collect-profiles" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.849565 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.849671 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 27 14:01:14 crc kubenswrapper[4900]: I0127 14:01:14.917783 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="582d5425-f779-4617-843e-f68dcfa2e79d" containerName="collect-profiles" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.031731 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.056824 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.148310 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zdrjw" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.274134 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-combined-ca-bundle\") pod \"keystone-cron-29492041-82dpf\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.292795 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-config-data\") pod \"keystone-cron-29492041-82dpf\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.293409 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmnvg\" (UniqueName: \"kubernetes.io/projected/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-kube-api-access-xmnvg\") pod \"keystone-cron-29492041-82dpf\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.293515 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-fernet-keys\") pod \"keystone-cron-29492041-82dpf\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.404583 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-config-data\") pod \"keystone-cron-29492041-82dpf\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.409514 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmnvg\" (UniqueName: \"kubernetes.io/projected/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-kube-api-access-xmnvg\") pod \"keystone-cron-29492041-82dpf\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.409636 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-fernet-keys\") pod \"keystone-cron-29492041-82dpf\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.409810 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-combined-ca-bundle\") pod \"keystone-cron-29492041-82dpf\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.524207 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29492041-82dpf"] Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.534759 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-config-data\") pod \"keystone-cron-29492041-82dpf\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.577595 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-combined-ca-bundle\") pod \"keystone-cron-29492041-82dpf\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.580803 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-fernet-keys\") pod \"keystone-cron-29492041-82dpf\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.581455 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmnvg\" (UniqueName: \"kubernetes.io/projected/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-kube-api-access-xmnvg\") pod \"keystone-cron-29492041-82dpf\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.734075 4900 generic.go:334] "Generic (PLEG): container finished" podID="e08773f7-5eaf-4a76-b671-0681c02a3471" containerID="34a9594c6ae90d00461e21b81bedad93da2bc761e8d58e4762f7d4aa27511d4a" exitCode=0 Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.734302 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" event={"ID":"e08773f7-5eaf-4a76-b671-0681c02a3471","Type":"ContainerDied","Data":"34a9594c6ae90d00461e21b81bedad93da2bc761e8d58e4762f7d4aa27511d4a"} Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.735403 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.764972 4900 generic.go:334] "Generic (PLEG): container finished" podID="899811c4-fce0-42df-b3e7-9b1495cad676" containerID="f6f5f302e7703bf610746eb0aefa198f328bdd65009e6ebf2fb182c19e425e5f" exitCode=0 Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.765126 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" event={"ID":"899811c4-fce0-42df-b3e7-9b1495cad676","Type":"ContainerDied","Data":"f6f5f302e7703bf610746eb0aefa198f328bdd65009e6ebf2fb182c19e425e5f"} Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.855105 4900 patch_prober.go:28] interesting pod/router-default-5444994796-zvswh container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.855200 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zvswh" podUID="0703acf1-af71-4249-b1ef-e19c6beb4d86" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 27 14:01:15 crc kubenswrapper[4900]: I0127 14:01:15.977680 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-jnwth" Jan 27 14:01:16 crc kubenswrapper[4900]: I0127 14:01:16.123199 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-hn2jw" Jan 27 14:01:16 crc kubenswrapper[4900]: I0127 14:01:16.196018 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openstack-operators/openstack-operator-index-xvszg" podUID="147d12f6-3180-41d8-92c9-55aab763d313" containerName="registry-server" probeResult="failure" output=< Jan 27 14:01:16 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:01:16 crc kubenswrapper[4900]: > Jan 27 14:01:16 crc kubenswrapper[4900]: E0127 14:01:16.477707 4900 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeca1d592_3310_47ed_a815_8f32bc974d9b.slice/crio-dab18b2a809157ae109d5e39c95348af980e050d00841ca94df479d1c2b86402.scope\": RecentStats: unable to find data in memory cache]" Jan 27 14:01:16 crc kubenswrapper[4900]: I0127 14:01:16.619544 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-866c485c84-8p95x" Jan 27 14:01:16 crc kubenswrapper[4900]: I0127 14:01:16.887012 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 14:01:16 crc kubenswrapper[4900]: I0127 14:01:16.890458 4900 generic.go:334] "Generic (PLEG): container finished" podID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerID="dab18b2a809157ae109d5e39c95348af980e050d00841ca94df479d1c2b86402" exitCode=0 Jan 27 14:01:16 crc kubenswrapper[4900]: I0127 14:01:16.890535 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"eca1d592-3310-47ed-a815-8f32bc974d9b","Type":"ContainerDied","Data":"dab18b2a809157ae109d5e39c95348af980e050d00841ca94df479d1c2b86402"} Jan 27 14:01:16 crc kubenswrapper[4900]: I0127 14:01:16.909624 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-zvswh" Jan 27 14:01:16 crc kubenswrapper[4900]: I0127 14:01:16.955260 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" podUID="88330a90-8030-489a-898c-2690958a1a8e" containerName="oauth-openshift" containerID="cri-o://99b7678c82d77089ad530dce58e1c26fc00601dda0d1d842420d2dfbee4caa76" gracePeriod=14 Jan 27 14:01:17 crc kubenswrapper[4900]: I0127 14:01:17.682869 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 14:01:17 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:01:17 crc kubenswrapper[4900]: > Jan 27 14:01:18 crc kubenswrapper[4900]: I0127 14:01:18.931754 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-844499dc88-f72ld" Jan 27 14:01:18 crc kubenswrapper[4900]: I0127 14:01:18.947770 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" event={"ID":"e08773f7-5eaf-4a76-b671-0681c02a3471","Type":"ContainerStarted","Data":"cf81124588ad286145b9c4fb33db7397bf09c9ed33627b50c79717cb7715e175"} Jan 27 14:01:18 crc kubenswrapper[4900]: I0127 14:01:18.950472 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 14:01:18 crc kubenswrapper[4900]: I0127 14:01:18.962691 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" event={"ID":"899811c4-fce0-42df-b3e7-9b1495cad676","Type":"ContainerStarted","Data":"34d9ab8e900aa702ca5f33a8a2fc830dc580978e49694fdeeb3df6324ccd0f91"} Jan 27 14:01:18 crc kubenswrapper[4900]: I0127 14:01:18.963609 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 14:01:18 crc kubenswrapper[4900]: I0127 14:01:18.978527 4900 generic.go:334] "Generic (PLEG): container finished" podID="88330a90-8030-489a-898c-2690958a1a8e" containerID="99b7678c82d77089ad530dce58e1c26fc00601dda0d1d842420d2dfbee4caa76" exitCode=0 Jan 27 14:01:18 crc kubenswrapper[4900]: I0127 14:01:18.978719 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" event={"ID":"88330a90-8030-489a-898c-2690958a1a8e","Type":"ContainerDied","Data":"99b7678c82d77089ad530dce58e1c26fc00601dda0d1d842420d2dfbee4caa76"} Jan 27 14:01:18 crc kubenswrapper[4900]: I0127 14:01:18.987344 4900 patch_prober.go:28] interesting pod/oauth-openshift-6d4d98fcc6-f4gd5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.68:6443/healthz\": dial tcp 10.217.0.68:6443: connect: connection refused" start-of-body= Jan 27 14:01:18 crc kubenswrapper[4900]: I0127 14:01:18.987414 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" podUID="88330a90-8030-489a-898c-2690958a1a8e" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.68:6443/healthz\": dial tcp 10.217.0.68:6443: connect: connection refused" Jan 27 14:01:19 crc kubenswrapper[4900]: I0127 14:01:18.999182 4900 generic.go:334] "Generic (PLEG): container finished" podID="7626ad91-9f29-4dae-969a-e23d420319ac" containerID="6fdd71884020c539bd9777c35cbe22887618ed748b95b5f0a35e3375e433dbda" exitCode=0 Jan 27 14:01:19 crc kubenswrapper[4900]: I0127 14:01:18.999389 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7626ad91-9f29-4dae-969a-e23d420319ac","Type":"ContainerDied","Data":"6fdd71884020c539bd9777c35cbe22887618ed748b95b5f0a35e3375e433dbda"} Jan 27 14:01:19 crc kubenswrapper[4900]: I0127 14:01:19.023440 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-chtfl_ac68471d-4581-477c-bcff-415b8f8ea21e/registry-server/2.log" Jan 27 14:01:19 crc kubenswrapper[4900]: I0127 14:01:19.046144 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-chtfl" event={"ID":"ac68471d-4581-477c-bcff-415b8f8ea21e","Type":"ContainerStarted","Data":"e5933322d57c6df5b7d7d617cd8603776fe3cb38cbae5d8f137001d3a9c7ca44"} Jan 27 14:01:19 crc kubenswrapper[4900]: I0127 14:01:19.120880 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"eca1d592-3310-47ed-a815-8f32bc974d9b","Type":"ContainerStarted","Data":"8eba5b638895b6eab68daac99d0ae3b7f26f843fb6180d7f63e3ab3f16317e95"} Jan 27 14:01:20 crc kubenswrapper[4900]: I0127 14:01:20.236651 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7626ad91-9f29-4dae-969a-e23d420319ac","Type":"ContainerStarted","Data":"9834e5f8c47113c53c6968fba0d53e24e50c56b40d9ecc51b9017e3126f42d32"} Jan 27 14:01:20 crc kubenswrapper[4900]: I0127 14:01:20.830164 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29492041-82dpf"] Jan 27 14:01:21 crc kubenswrapper[4900]: I0127 14:01:21.366422 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29492041-82dpf" event={"ID":"d10a0144-94bd-4c3d-bca4-fe13a7ed6967","Type":"ContainerStarted","Data":"ce8f47a31268d0fd920db90d78c9fb371abb55296b9c28806e68998cc251f040"} Jan 27 14:01:21 crc kubenswrapper[4900]: I0127 14:01:21.382668 4900 generic.go:334] "Generic (PLEG): container finished" podID="6bf50105-b28f-4123-a6f8-75124e213fcc" containerID="af3c570545cf826144035311b08eba9b878bb342fee4e73f72daffd5d60b8907" exitCode=0 Jan 27 14:01:21 crc kubenswrapper[4900]: I0127 14:01:21.382997 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6bf50105-b28f-4123-a6f8-75124e213fcc","Type":"ContainerDied","Data":"af3c570545cf826144035311b08eba9b878bb342fee4e73f72daffd5d60b8907"} Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.134039 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.194240 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-xvszg" Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.253863 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-kbwqx" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" probeResult="failure" output=< Jan 27 14:01:22 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:01:22 crc kubenswrapper[4900]: > Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.253975 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.255541 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"bdfa4c5a925247d3bcbc0deeedb7b5b18af80549c024d73a300e838001662973"} pod="openshift-marketplace/redhat-marketplace-kbwqx" containerMessage="Container registry-server failed startup probe, will be restarted" Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.255591 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kbwqx" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" containerID="cri-o://bdfa4c5a925247d3bcbc0deeedb7b5b18af80549c024d73a300e838001662973" gracePeriod=30 Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.265895 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.265959 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.372836 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.372930 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.416149 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29492041-82dpf" event={"ID":"d10a0144-94bd-4c3d-bca4-fe13a7ed6967","Type":"ContainerStarted","Data":"ffb24f2bd3030d2a6d0b9c0479c164e3168ab06ba751adc419c0756bff71b384"} Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.426571 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" event={"ID":"88330a90-8030-489a-898c-2690958a1a8e","Type":"ContainerStarted","Data":"acf20aaefca8e57946b548709de8e344f7094f96292ead042de21148d4fe77bb"} Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.427000 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.428086 4900 patch_prober.go:28] interesting pod/oauth-openshift-6d4d98fcc6-f4gd5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.68:6443/healthz\": dial tcp 10.217.0.68:6443: connect: connection refused" start-of-body= Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.428191 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" podUID="88330a90-8030-489a-898c-2690958a1a8e" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.68:6443/healthz\": dial tcp 10.217.0.68:6443: connect: connection refused" Jan 27 14:01:22 crc kubenswrapper[4900]: I0127 14:01:22.465584 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29492041-82dpf" podStartSLOduration=20.455025643 podStartE2EDuration="20.455025643s" podCreationTimestamp="2026-01-27 14:01:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 14:01:22.451881482 +0000 UTC m=+5709.688909692" watchObservedRunningTime="2026-01-27 14:01:22.455025643 +0000 UTC m=+5709.692053853" Jan 27 14:01:23 crc kubenswrapper[4900]: I0127 14:01:23.313835 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7c8f46b9cc-4h24l" Jan 27 14:01:23 crc kubenswrapper[4900]: I0127 14:01:23.722543 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 27 14:01:23 crc kubenswrapper[4900]: I0127 14:01:23.722592 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 27 14:01:24 crc kubenswrapper[4900]: I0127 14:01:24.005626 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6d4d98fcc6-f4gd5" Jan 27 14:01:26 crc kubenswrapper[4900]: I0127 14:01:26.197995 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 27 14:01:26 crc kubenswrapper[4900]: I0127 14:01:26.554325 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 27 14:01:26 crc kubenswrapper[4900]: I0127 14:01:26.606296 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 27 14:01:26 crc kubenswrapper[4900]: I0127 14:01:26.783681 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 27 14:01:27 crc kubenswrapper[4900]: I0127 14:01:27.561800 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 14:01:27 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:01:27 crc kubenswrapper[4900]: > Jan 27 14:01:28 crc kubenswrapper[4900]: I0127 14:01:28.694573 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6bf50105-b28f-4123-a6f8-75124e213fcc","Type":"ContainerStarted","Data":"c3e666595c78a3411d29c1c2dc0c61dc4b2be3b2dd5b11df3b0db097d219427f"} Jan 27 14:01:28 crc kubenswrapper[4900]: I0127 14:01:28.716509 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-chtfl" Jan 27 14:01:28 crc kubenswrapper[4900]: I0127 14:01:28.716580 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-chtfl" Jan 27 14:01:29 crc kubenswrapper[4900]: I0127 14:01:29.710844 4900 generic.go:334] "Generic (PLEG): container finished" podID="ac88ca80-18bc-417b-8a7d-5ca2666524e3" containerID="e851972b44f9ab9caa5895feb925588891622524f8b9ed75f3defccb1240c4be" exitCode=1 Jan 27 14:01:29 crc kubenswrapper[4900]: I0127 14:01:29.710958 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"ac88ca80-18bc-417b-8a7d-5ca2666524e3","Type":"ContainerDied","Data":"e851972b44f9ab9caa5895feb925588891622524f8b9ed75f3defccb1240c4be"} Jan 27 14:01:29 crc kubenswrapper[4900]: I0127 14:01:29.835418 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" probeResult="failure" output=< Jan 27 14:01:29 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:01:29 crc kubenswrapper[4900]: > Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.741170 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"ac88ca80-18bc-417b-8a7d-5ca2666524e3","Type":"ContainerDied","Data":"949db1039723ef32c0e17242a545386ea87041ce1b548624149b1e4ed2a9de0f"} Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.741386 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.742828 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="949db1039723ef32c0e17242a545386ea87041ce1b548624149b1e4ed2a9de0f" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.749977 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.750072 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/ac88ca80-18bc-417b-8a7d-5ca2666524e3-test-operator-ephemeral-workdir\") pod \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.750247 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-openstack-config-secret\") pod \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.750430 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-ca-certs\") pod \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.750841 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ac88ca80-18bc-417b-8a7d-5ca2666524e3-config-data\") pod \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.753383 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ac88ca80-18bc-417b-8a7d-5ca2666524e3-openstack-config\") pod \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.752575 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac88ca80-18bc-417b-8a7d-5ca2666524e3-config-data" (OuterVolumeSpecName: "config-data") pod "ac88ca80-18bc-417b-8a7d-5ca2666524e3" (UID: "ac88ca80-18bc-417b-8a7d-5ca2666524e3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.753529 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jbsds\" (UniqueName: \"kubernetes.io/projected/ac88ca80-18bc-417b-8a7d-5ca2666524e3-kube-api-access-jbsds\") pod \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.754148 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-ssh-key\") pod \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.754201 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/ac88ca80-18bc-417b-8a7d-5ca2666524e3-test-operator-ephemeral-temporary\") pod \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\" (UID: \"ac88ca80-18bc-417b-8a7d-5ca2666524e3\") " Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.755824 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ac88ca80-18bc-417b-8a7d-5ca2666524e3-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.758685 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac88ca80-18bc-417b-8a7d-5ca2666524e3-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "ac88ca80-18bc-417b-8a7d-5ca2666524e3" (UID: "ac88ca80-18bc-417b-8a7d-5ca2666524e3"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.765140 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac88ca80-18bc-417b-8a7d-5ca2666524e3-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "ac88ca80-18bc-417b-8a7d-5ca2666524e3" (UID: "ac88ca80-18bc-417b-8a7d-5ca2666524e3"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.773769 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "test-operator-logs") pod "ac88ca80-18bc-417b-8a7d-5ca2666524e3" (UID: "ac88ca80-18bc-417b-8a7d-5ca2666524e3"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.811902 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac88ca80-18bc-417b-8a7d-5ca2666524e3-kube-api-access-jbsds" (OuterVolumeSpecName: "kube-api-access-jbsds") pod "ac88ca80-18bc-417b-8a7d-5ca2666524e3" (UID: "ac88ca80-18bc-417b-8a7d-5ca2666524e3"). InnerVolumeSpecName "kube-api-access-jbsds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.861601 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jbsds\" (UniqueName: \"kubernetes.io/projected/ac88ca80-18bc-417b-8a7d-5ca2666524e3-kube-api-access-jbsds\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.861660 4900 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/ac88ca80-18bc-417b-8a7d-5ca2666524e3-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.861758 4900 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.861772 4900 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/ac88ca80-18bc-417b-8a7d-5ca2666524e3-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.900102 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "ac88ca80-18bc-417b-8a7d-5ca2666524e3" (UID: "ac88ca80-18bc-417b-8a7d-5ca2666524e3"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.933373 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ac88ca80-18bc-417b-8a7d-5ca2666524e3" (UID: "ac88ca80-18bc-417b-8a7d-5ca2666524e3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.964828 4900 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.968826 4900 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.968876 4900 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-ssh-key\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.968897 4900 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:31 crc kubenswrapper[4900]: I0127 14:01:31.980326 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "ac88ca80-18bc-417b-8a7d-5ca2666524e3" (UID: "ac88ca80-18bc-417b-8a7d-5ca2666524e3"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 14:01:32 crc kubenswrapper[4900]: I0127 14:01:32.029783 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac88ca80-18bc-417b-8a7d-5ca2666524e3-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "ac88ca80-18bc-417b-8a7d-5ca2666524e3" (UID: "ac88ca80-18bc-417b-8a7d-5ca2666524e3"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 14:01:32 crc kubenswrapper[4900]: I0127 14:01:32.072429 4900 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/ac88ca80-18bc-417b-8a7d-5ca2666524e3-ca-certs\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:32 crc kubenswrapper[4900]: I0127 14:01:32.072522 4900 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/ac88ca80-18bc-417b-8a7d-5ca2666524e3-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:32 crc kubenswrapper[4900]: I0127 14:01:32.689519 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 27 14:01:32 crc kubenswrapper[4900]: I0127 14:01:32.760130 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 27 14:01:32 crc kubenswrapper[4900]: I0127 14:01:32.959529 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85462bq5" Jan 27 14:01:35 crc kubenswrapper[4900]: I0127 14:01:35.824612 4900 generic.go:334] "Generic (PLEG): container finished" podID="d10a0144-94bd-4c3d-bca4-fe13a7ed6967" containerID="ffb24f2bd3030d2a6d0b9c0479c164e3168ab06ba751adc419c0756bff71b384" exitCode=0 Jan 27 14:01:35 crc kubenswrapper[4900]: I0127 14:01:35.824738 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29492041-82dpf" event={"ID":"d10a0144-94bd-4c3d-bca4-fe13a7ed6967","Type":"ContainerDied","Data":"ffb24f2bd3030d2a6d0b9c0479c164e3168ab06ba751adc419c0756bff71b384"} Jan 27 14:01:35 crc kubenswrapper[4900]: I0127 14:01:35.998186 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 27 14:01:35 crc kubenswrapper[4900]: E0127 14:01:35.999318 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac88ca80-18bc-417b-8a7d-5ca2666524e3" containerName="tempest-tests-tempest-tests-runner" Jan 27 14:01:35 crc kubenswrapper[4900]: I0127 14:01:35.999349 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac88ca80-18bc-417b-8a7d-5ca2666524e3" containerName="tempest-tests-tempest-tests-runner" Jan 27 14:01:35 crc kubenswrapper[4900]: I0127 14:01:35.999639 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac88ca80-18bc-417b-8a7d-5ca2666524e3" containerName="tempest-tests-tempest-tests-runner" Jan 27 14:01:36 crc kubenswrapper[4900]: I0127 14:01:36.000955 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 14:01:36 crc kubenswrapper[4900]: I0127 14:01:36.017331 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-x4bq2" Jan 27 14:01:36 crc kubenswrapper[4900]: I0127 14:01:36.021188 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 27 14:01:36 crc kubenswrapper[4900]: I0127 14:01:36.132700 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5bd0ef1b-2a03-4069-ab40-fa57d5720373\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 14:01:36 crc kubenswrapper[4900]: I0127 14:01:36.133121 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nm9bn\" (UniqueName: \"kubernetes.io/projected/5bd0ef1b-2a03-4069-ab40-fa57d5720373-kube-api-access-nm9bn\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5bd0ef1b-2a03-4069-ab40-fa57d5720373\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 14:01:36 crc kubenswrapper[4900]: I0127 14:01:36.236122 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5bd0ef1b-2a03-4069-ab40-fa57d5720373\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 14:01:36 crc kubenswrapper[4900]: I0127 14:01:36.236438 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nm9bn\" (UniqueName: \"kubernetes.io/projected/5bd0ef1b-2a03-4069-ab40-fa57d5720373-kube-api-access-nm9bn\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5bd0ef1b-2a03-4069-ab40-fa57d5720373\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 14:01:36 crc kubenswrapper[4900]: I0127 14:01:36.237660 4900 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5bd0ef1b-2a03-4069-ab40-fa57d5720373\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 14:01:36 crc kubenswrapper[4900]: I0127 14:01:36.269390 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nm9bn\" (UniqueName: \"kubernetes.io/projected/5bd0ef1b-2a03-4069-ab40-fa57d5720373-kube-api-access-nm9bn\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5bd0ef1b-2a03-4069-ab40-fa57d5720373\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 14:01:36 crc kubenswrapper[4900]: I0127 14:01:36.290779 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5bd0ef1b-2a03-4069-ab40-fa57d5720373\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 14:01:36 crc kubenswrapper[4900]: I0127 14:01:36.344450 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 27 14:01:37 crc kubenswrapper[4900]: I0127 14:01:37.183945 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 27 14:01:37 crc kubenswrapper[4900]: I0127 14:01:37.534251 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 14:01:37 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:01:37 crc kubenswrapper[4900]: > Jan 27 14:01:37 crc kubenswrapper[4900]: I0127 14:01:37.534861 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 14:01:37 crc kubenswrapper[4900]: I0127 14:01:37.535971 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"9991df40d410601ffe901c22904434f4a34bf385283369ac4358e2182f170478"} pod="openshift-marketplace/redhat-operators-bngd7" containerMessage="Container registry-server failed startup probe, will be restarted" Jan 27 14:01:37 crc kubenswrapper[4900]: I0127 14:01:37.536130 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" containerID="cri-o://9991df40d410601ffe901c22904434f4a34bf385283369ac4358e2182f170478" gracePeriod=30 Jan 27 14:01:37 crc kubenswrapper[4900]: I0127 14:01:37.736240 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 27 14:01:38 crc kubenswrapper[4900]: W0127 14:01:38.007543 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5bd0ef1b_2a03_4069_ab40_fa57d5720373.slice/crio-77164cf64b1d905ff11d5080b6bcb94ae24f5dddc50217c305c8f32d002430a2 WatchSource:0}: Error finding container 77164cf64b1d905ff11d5080b6bcb94ae24f5dddc50217c305c8f32d002430a2: Status 404 returned error can't find the container with id 77164cf64b1d905ff11d5080b6bcb94ae24f5dddc50217c305c8f32d002430a2 Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.173284 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.338645 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-fernet-keys\") pod \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.340770 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmnvg\" (UniqueName: \"kubernetes.io/projected/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-kube-api-access-xmnvg\") pod \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.341094 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-combined-ca-bundle\") pod \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.341884 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-config-data\") pod \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\" (UID: \"d10a0144-94bd-4c3d-bca4-fe13a7ed6967\") " Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.357508 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "d10a0144-94bd-4c3d-bca4-fe13a7ed6967" (UID: "d10a0144-94bd-4c3d-bca4-fe13a7ed6967"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.358428 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-kube-api-access-xmnvg" (OuterVolumeSpecName: "kube-api-access-xmnvg") pod "d10a0144-94bd-4c3d-bca4-fe13a7ed6967" (UID: "d10a0144-94bd-4c3d-bca4-fe13a7ed6967"). InnerVolumeSpecName "kube-api-access-xmnvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.447232 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d10a0144-94bd-4c3d-bca4-fe13a7ed6967" (UID: "d10a0144-94bd-4c3d-bca4-fe13a7ed6967"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.450258 4900 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.450321 4900 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.450332 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmnvg\" (UniqueName: \"kubernetes.io/projected/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-kube-api-access-xmnvg\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.502142 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-config-data" (OuterVolumeSpecName: "config-data") pod "d10a0144-94bd-4c3d-bca4-fe13a7ed6967" (UID: "d10a0144-94bd-4c3d-bca4-fe13a7ed6967"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.553457 4900 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d10a0144-94bd-4c3d-bca4-fe13a7ed6967-config-data\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.788914 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-chtfl" Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.863500 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-chtfl" Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.904016 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29492041-82dpf" Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.904014 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29492041-82dpf" event={"ID":"d10a0144-94bd-4c3d-bca4-fe13a7ed6967","Type":"ContainerDied","Data":"ce8f47a31268d0fd920db90d78c9fb371abb55296b9c28806e68998cc251f040"} Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.904241 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce8f47a31268d0fd920db90d78c9fb371abb55296b9c28806e68998cc251f040" Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.910736 4900 generic.go:334] "Generic (PLEG): container finished" podID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerID="bdfa4c5a925247d3bcbc0deeedb7b5b18af80549c024d73a300e838001662973" exitCode=0 Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.910863 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kbwqx" event={"ID":"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b","Type":"ContainerDied","Data":"bdfa4c5a925247d3bcbc0deeedb7b5b18af80549c024d73a300e838001662973"} Jan 27 14:01:38 crc kubenswrapper[4900]: I0127 14:01:38.913358 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"5bd0ef1b-2a03-4069-ab40-fa57d5720373","Type":"ContainerStarted","Data":"77164cf64b1d905ff11d5080b6bcb94ae24f5dddc50217c305c8f32d002430a2"} Jan 27 14:01:39 crc kubenswrapper[4900]: I0127 14:01:39.052802 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-chtfl"] Jan 27 14:01:39 crc kubenswrapper[4900]: I0127 14:01:39.932414 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kbwqx" event={"ID":"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b","Type":"ContainerStarted","Data":"01c47b49e0ed76628ec4169f2f151962bf6feef30afff4878126dddb9e40543a"} Jan 27 14:01:39 crc kubenswrapper[4900]: I0127 14:01:39.933972 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-chtfl" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" containerID="cri-o://e5933322d57c6df5b7d7d617cd8603776fe3cb38cbae5d8f137001d3a9c7ca44" gracePeriod=2 Jan 27 14:01:40 crc kubenswrapper[4900]: I0127 14:01:40.955032 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-chtfl_ac68471d-4581-477c-bcff-415b8f8ea21e/registry-server/2.log" Jan 27 14:01:40 crc kubenswrapper[4900]: I0127 14:01:40.958450 4900 generic.go:334] "Generic (PLEG): container finished" podID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerID="e5933322d57c6df5b7d7d617cd8603776fe3cb38cbae5d8f137001d3a9c7ca44" exitCode=0 Jan 27 14:01:40 crc kubenswrapper[4900]: I0127 14:01:40.958679 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-chtfl" event={"ID":"ac68471d-4581-477c-bcff-415b8f8ea21e","Type":"ContainerDied","Data":"e5933322d57c6df5b7d7d617cd8603776fe3cb38cbae5d8f137001d3a9c7ca44"} Jan 27 14:01:40 crc kubenswrapper[4900]: I0127 14:01:40.958815 4900 scope.go:117] "RemoveContainer" containerID="edf34d1f731a0ed88ee92135cb225664c8ae3dfdfe5de507d718d31c06dfc817" Jan 27 14:01:41 crc kubenswrapper[4900]: I0127 14:01:41.052283 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 14:01:41 crc kubenswrapper[4900]: I0127 14:01:41.052360 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 14:01:41 crc kubenswrapper[4900]: I0127 14:01:41.686276 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-chtfl" Jan 27 14:01:41 crc kubenswrapper[4900]: I0127 14:01:41.874016 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cd5pz\" (UniqueName: \"kubernetes.io/projected/ac68471d-4581-477c-bcff-415b8f8ea21e-kube-api-access-cd5pz\") pod \"ac68471d-4581-477c-bcff-415b8f8ea21e\" (UID: \"ac68471d-4581-477c-bcff-415b8f8ea21e\") " Jan 27 14:01:41 crc kubenswrapper[4900]: I0127 14:01:41.874352 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac68471d-4581-477c-bcff-415b8f8ea21e-utilities\") pod \"ac68471d-4581-477c-bcff-415b8f8ea21e\" (UID: \"ac68471d-4581-477c-bcff-415b8f8ea21e\") " Jan 27 14:01:41 crc kubenswrapper[4900]: I0127 14:01:41.874587 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac68471d-4581-477c-bcff-415b8f8ea21e-catalog-content\") pod \"ac68471d-4581-477c-bcff-415b8f8ea21e\" (UID: \"ac68471d-4581-477c-bcff-415b8f8ea21e\") " Jan 27 14:01:41 crc kubenswrapper[4900]: I0127 14:01:41.875028 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac68471d-4581-477c-bcff-415b8f8ea21e-utilities" (OuterVolumeSpecName: "utilities") pod "ac68471d-4581-477c-bcff-415b8f8ea21e" (UID: "ac68471d-4581-477c-bcff-415b8f8ea21e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:01:41 crc kubenswrapper[4900]: I0127 14:01:41.876198 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac68471d-4581-477c-bcff-415b8f8ea21e-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:41 crc kubenswrapper[4900]: I0127 14:01:41.925221 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac68471d-4581-477c-bcff-415b8f8ea21e-kube-api-access-cd5pz" (OuterVolumeSpecName: "kube-api-access-cd5pz") pod "ac68471d-4581-477c-bcff-415b8f8ea21e" (UID: "ac68471d-4581-477c-bcff-415b8f8ea21e"). InnerVolumeSpecName "kube-api-access-cd5pz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:01:41 crc kubenswrapper[4900]: I0127 14:01:41.965924 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac68471d-4581-477c-bcff-415b8f8ea21e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ac68471d-4581-477c-bcff-415b8f8ea21e" (UID: "ac68471d-4581-477c-bcff-415b8f8ea21e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:01:41 crc kubenswrapper[4900]: I0127 14:01:41.980830 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cd5pz\" (UniqueName: \"kubernetes.io/projected/ac68471d-4581-477c-bcff-415b8f8ea21e-kube-api-access-cd5pz\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:41 crc kubenswrapper[4900]: I0127 14:01:41.981270 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac68471d-4581-477c-bcff-415b8f8ea21e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 14:01:41 crc kubenswrapper[4900]: I0127 14:01:41.987390 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"5bd0ef1b-2a03-4069-ab40-fa57d5720373","Type":"ContainerStarted","Data":"4afe902cbdfdef1c54b74b51b0acbf43cf68567f934d1a924381070db42e32e9"} Jan 27 14:01:42 crc kubenswrapper[4900]: I0127 14:01:42.000447 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-chtfl" Jan 27 14:01:42 crc kubenswrapper[4900]: I0127 14:01:42.002634 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-chtfl" event={"ID":"ac68471d-4581-477c-bcff-415b8f8ea21e","Type":"ContainerDied","Data":"29fc97149564c3a61fc7c96326999df854b121850e0cc1566b35b236b71c83b1"} Jan 27 14:01:42 crc kubenswrapper[4900]: I0127 14:01:42.002897 4900 scope.go:117] "RemoveContainer" containerID="e5933322d57c6df5b7d7d617cd8603776fe3cb38cbae5d8f137001d3a9c7ca44" Jan 27 14:01:42 crc kubenswrapper[4900]: I0127 14:01:42.032807 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=4.13404145 podStartE2EDuration="7.032767274s" podCreationTimestamp="2026-01-27 14:01:35 +0000 UTC" firstStartedPulling="2026-01-27 14:01:38.018920455 +0000 UTC m=+5725.255948665" lastFinishedPulling="2026-01-27 14:01:40.917646279 +0000 UTC m=+5728.154674489" observedRunningTime="2026-01-27 14:01:42.005513876 +0000 UTC m=+5729.242542096" watchObservedRunningTime="2026-01-27 14:01:42.032767274 +0000 UTC m=+5729.269795484" Jan 27 14:01:42 crc kubenswrapper[4900]: I0127 14:01:42.059516 4900 scope.go:117] "RemoveContainer" containerID="8b78ba1587e8b754259b96a88a9d4221731423045a78c55d9c96c1223bbfefde" Jan 27 14:01:42 crc kubenswrapper[4900]: I0127 14:01:42.072173 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-chtfl"] Jan 27 14:01:42 crc kubenswrapper[4900]: I0127 14:01:42.089243 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-chtfl"] Jan 27 14:01:42 crc kubenswrapper[4900]: I0127 14:01:42.124541 4900 scope.go:117] "RemoveContainer" containerID="b5dc958ed45ebf020fa83bfe0b2439409af357c1c5f7d4fdcccc3d65c2bf717e" Jan 27 14:01:42 crc kubenswrapper[4900]: I0127 14:01:42.142947 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-kbwqx" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" probeResult="failure" output=< Jan 27 14:01:42 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:01:42 crc kubenswrapper[4900]: > Jan 27 14:01:42 crc kubenswrapper[4900]: I0127 14:01:42.501204 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" path="/var/lib/kubelet/pods/ac68471d-4581-477c-bcff-415b8f8ea21e/volumes" Jan 27 14:01:52 crc kubenswrapper[4900]: I0127 14:01:52.164461 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-kbwqx" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" probeResult="failure" output=< Jan 27 14:01:52 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:01:52 crc kubenswrapper[4900]: > Jan 27 14:01:52 crc kubenswrapper[4900]: I0127 14:01:52.372843 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 14:01:52 crc kubenswrapper[4900]: I0127 14:01:52.373314 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 14:01:56 crc kubenswrapper[4900]: I0127 14:01:56.134051 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 27 14:02:01 crc kubenswrapper[4900]: I0127 14:02:01.170646 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 14:02:01 crc kubenswrapper[4900]: I0127 14:02:01.239710 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 14:02:01 crc kubenswrapper[4900]: I0127 14:02:01.429667 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kbwqx"] Jan 27 14:02:02 crc kubenswrapper[4900]: I0127 14:02:02.277184 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kbwqx" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" containerID="cri-o://01c47b49e0ed76628ec4169f2f151962bf6feef30afff4878126dddb9e40543a" gracePeriod=2 Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.029280 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.211017 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4r44g\" (UniqueName: \"kubernetes.io/projected/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-kube-api-access-4r44g\") pod \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\" (UID: \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\") " Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.211408 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-catalog-content\") pod \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\" (UID: \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\") " Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.211637 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-utilities\") pod \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\" (UID: \"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b\") " Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.213428 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-utilities" (OuterVolumeSpecName: "utilities") pod "d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" (UID: "d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.229268 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-kube-api-access-4r44g" (OuterVolumeSpecName: "kube-api-access-4r44g") pod "d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" (UID: "d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b"). InnerVolumeSpecName "kube-api-access-4r44g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.249916 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" (UID: "d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.316243 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4r44g\" (UniqueName: \"kubernetes.io/projected/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-kube-api-access-4r44g\") on node \"crc\" DevicePath \"\"" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.316294 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.316307 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.316706 4900 generic.go:334] "Generic (PLEG): container finished" podID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerID="01c47b49e0ed76628ec4169f2f151962bf6feef30afff4878126dddb9e40543a" exitCode=0 Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.316773 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kbwqx" event={"ID":"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b","Type":"ContainerDied","Data":"01c47b49e0ed76628ec4169f2f151962bf6feef30afff4878126dddb9e40543a"} Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.316818 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kbwqx" event={"ID":"d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b","Type":"ContainerDied","Data":"a17dfa61f23eaf620eefca77de11098deb7ce474be9fb6f8c572e38cd07e5a69"} Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.316843 4900 scope.go:117] "RemoveContainer" containerID="01c47b49e0ed76628ec4169f2f151962bf6feef30afff4878126dddb9e40543a" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.317105 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kbwqx" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.377261 4900 scope.go:117] "RemoveContainer" containerID="bdfa4c5a925247d3bcbc0deeedb7b5b18af80549c024d73a300e838001662973" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.385178 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kbwqx"] Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.422271 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kbwqx"] Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.423624 4900 scope.go:117] "RemoveContainer" containerID="f053fb8b27e53d0e0c0d100265588a84e1d71998ed312c1ee3db4fe3dc3b366b" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.471715 4900 scope.go:117] "RemoveContainer" containerID="980113358fa2892fe8c60e2e22f411887d67801372a67fdfc4b3d9dcdc99a5d2" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.548620 4900 scope.go:117] "RemoveContainer" containerID="01c47b49e0ed76628ec4169f2f151962bf6feef30afff4878126dddb9e40543a" Jan 27 14:02:03 crc kubenswrapper[4900]: E0127 14:02:03.549908 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01c47b49e0ed76628ec4169f2f151962bf6feef30afff4878126dddb9e40543a\": container with ID starting with 01c47b49e0ed76628ec4169f2f151962bf6feef30afff4878126dddb9e40543a not found: ID does not exist" containerID="01c47b49e0ed76628ec4169f2f151962bf6feef30afff4878126dddb9e40543a" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.549964 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01c47b49e0ed76628ec4169f2f151962bf6feef30afff4878126dddb9e40543a"} err="failed to get container status \"01c47b49e0ed76628ec4169f2f151962bf6feef30afff4878126dddb9e40543a\": rpc error: code = NotFound desc = could not find container \"01c47b49e0ed76628ec4169f2f151962bf6feef30afff4878126dddb9e40543a\": container with ID starting with 01c47b49e0ed76628ec4169f2f151962bf6feef30afff4878126dddb9e40543a not found: ID does not exist" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.549990 4900 scope.go:117] "RemoveContainer" containerID="bdfa4c5a925247d3bcbc0deeedb7b5b18af80549c024d73a300e838001662973" Jan 27 14:02:03 crc kubenswrapper[4900]: E0127 14:02:03.552011 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdfa4c5a925247d3bcbc0deeedb7b5b18af80549c024d73a300e838001662973\": container with ID starting with bdfa4c5a925247d3bcbc0deeedb7b5b18af80549c024d73a300e838001662973 not found: ID does not exist" containerID="bdfa4c5a925247d3bcbc0deeedb7b5b18af80549c024d73a300e838001662973" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.552114 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdfa4c5a925247d3bcbc0deeedb7b5b18af80549c024d73a300e838001662973"} err="failed to get container status \"bdfa4c5a925247d3bcbc0deeedb7b5b18af80549c024d73a300e838001662973\": rpc error: code = NotFound desc = could not find container \"bdfa4c5a925247d3bcbc0deeedb7b5b18af80549c024d73a300e838001662973\": container with ID starting with bdfa4c5a925247d3bcbc0deeedb7b5b18af80549c024d73a300e838001662973 not found: ID does not exist" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.552163 4900 scope.go:117] "RemoveContainer" containerID="f053fb8b27e53d0e0c0d100265588a84e1d71998ed312c1ee3db4fe3dc3b366b" Jan 27 14:02:03 crc kubenswrapper[4900]: E0127 14:02:03.554749 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f053fb8b27e53d0e0c0d100265588a84e1d71998ed312c1ee3db4fe3dc3b366b\": container with ID starting with f053fb8b27e53d0e0c0d100265588a84e1d71998ed312c1ee3db4fe3dc3b366b not found: ID does not exist" containerID="f053fb8b27e53d0e0c0d100265588a84e1d71998ed312c1ee3db4fe3dc3b366b" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.555461 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f053fb8b27e53d0e0c0d100265588a84e1d71998ed312c1ee3db4fe3dc3b366b"} err="failed to get container status \"f053fb8b27e53d0e0c0d100265588a84e1d71998ed312c1ee3db4fe3dc3b366b\": rpc error: code = NotFound desc = could not find container \"f053fb8b27e53d0e0c0d100265588a84e1d71998ed312c1ee3db4fe3dc3b366b\": container with ID starting with f053fb8b27e53d0e0c0d100265588a84e1d71998ed312c1ee3db4fe3dc3b366b not found: ID does not exist" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.555491 4900 scope.go:117] "RemoveContainer" containerID="980113358fa2892fe8c60e2e22f411887d67801372a67fdfc4b3d9dcdc99a5d2" Jan 27 14:02:03 crc kubenswrapper[4900]: E0127 14:02:03.557768 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"980113358fa2892fe8c60e2e22f411887d67801372a67fdfc4b3d9dcdc99a5d2\": container with ID starting with 980113358fa2892fe8c60e2e22f411887d67801372a67fdfc4b3d9dcdc99a5d2 not found: ID does not exist" containerID="980113358fa2892fe8c60e2e22f411887d67801372a67fdfc4b3d9dcdc99a5d2" Jan 27 14:02:03 crc kubenswrapper[4900]: I0127 14:02:03.557820 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"980113358fa2892fe8c60e2e22f411887d67801372a67fdfc4b3d9dcdc99a5d2"} err="failed to get container status \"980113358fa2892fe8c60e2e22f411887d67801372a67fdfc4b3d9dcdc99a5d2\": rpc error: code = NotFound desc = could not find container \"980113358fa2892fe8c60e2e22f411887d67801372a67fdfc4b3d9dcdc99a5d2\": container with ID starting with 980113358fa2892fe8c60e2e22f411887d67801372a67fdfc4b3d9dcdc99a5d2 not found: ID does not exist" Jan 27 14:02:04 crc kubenswrapper[4900]: I0127 14:02:04.509905 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" path="/var/lib/kubelet/pods/d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b/volumes" Jan 27 14:02:08 crc kubenswrapper[4900]: I0127 14:02:08.411719 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/3.log" Jan 27 14:02:08 crc kubenswrapper[4900]: I0127 14:02:08.413952 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/2.log" Jan 27 14:02:08 crc kubenswrapper[4900]: I0127 14:02:08.415383 4900 generic.go:334] "Generic (PLEG): container finished" podID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerID="9991df40d410601ffe901c22904434f4a34bf385283369ac4358e2182f170478" exitCode=137 Jan 27 14:02:08 crc kubenswrapper[4900]: I0127 14:02:08.415445 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerDied","Data":"9991df40d410601ffe901c22904434f4a34bf385283369ac4358e2182f170478"} Jan 27 14:02:08 crc kubenswrapper[4900]: I0127 14:02:08.415510 4900 scope.go:117] "RemoveContainer" containerID="254bacb8cdc0181ab32eae1e52194721e7359505226c3523bf5d9e3315e99d48" Jan 27 14:02:09 crc kubenswrapper[4900]: I0127 14:02:09.439615 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/3.log" Jan 27 14:02:10 crc kubenswrapper[4900]: I0127 14:02:10.513506 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/3.log" Jan 27 14:02:10 crc kubenswrapper[4900]: I0127 14:02:10.591584 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerStarted","Data":"4747daeee1b8f8ba99b6a5c7528e9ea7625e0429432949c07bceb0f5011f7798"} Jan 27 14:02:16 crc kubenswrapper[4900]: I0127 14:02:16.457104 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 14:02:16 crc kubenswrapper[4900]: I0127 14:02:16.457897 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 14:02:17 crc kubenswrapper[4900]: I0127 14:02:17.522161 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 14:02:17 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:02:17 crc kubenswrapper[4900]: > Jan 27 14:02:22 crc kubenswrapper[4900]: I0127 14:02:22.373273 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 14:02:22 crc kubenswrapper[4900]: I0127 14:02:22.374215 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 14:02:22 crc kubenswrapper[4900]: I0127 14:02:22.374309 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 14:02:22 crc kubenswrapper[4900]: I0127 14:02:22.376526 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 14:02:22 crc kubenswrapper[4900]: I0127 14:02:22.376618 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" gracePeriod=600 Jan 27 14:02:22 crc kubenswrapper[4900]: E0127 14:02:22.504783 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:02:22 crc kubenswrapper[4900]: I0127 14:02:22.745585 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" exitCode=0 Jan 27 14:02:22 crc kubenswrapper[4900]: I0127 14:02:22.745859 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245"} Jan 27 14:02:22 crc kubenswrapper[4900]: I0127 14:02:22.746159 4900 scope.go:117] "RemoveContainer" containerID="a204cecd5425db0d7771e4d87ef37003fa244ebdee6545fd60e5e45f7ecd8382" Jan 27 14:02:22 crc kubenswrapper[4900]: I0127 14:02:22.750099 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:02:22 crc kubenswrapper[4900]: E0127 14:02:22.751280 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:02:28 crc kubenswrapper[4900]: I0127 14:02:28.144082 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 14:02:28 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:02:28 crc kubenswrapper[4900]: > Jan 27 14:02:34 crc kubenswrapper[4900]: I0127 14:02:34.482993 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:02:34 crc kubenswrapper[4900]: E0127 14:02:34.484417 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.805662 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-47j5g/must-gather-whhtm"] Jan 27 14:02:36 crc kubenswrapper[4900]: E0127 14:02:36.807723 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.807751 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: E0127 14:02:36.807773 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.807781 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: E0127 14:02:36.807822 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="extract-utilities" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.807833 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="extract-utilities" Jan 27 14:02:36 crc kubenswrapper[4900]: E0127 14:02:36.807847 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="extract-content" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.807854 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="extract-content" Jan 27 14:02:36 crc kubenswrapper[4900]: E0127 14:02:36.807879 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="extract-content" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.807885 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="extract-content" Jan 27 14:02:36 crc kubenswrapper[4900]: E0127 14:02:36.807902 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.807909 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: E0127 14:02:36.807937 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.807943 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: E0127 14:02:36.807956 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d10a0144-94bd-4c3d-bca4-fe13a7ed6967" containerName="keystone-cron" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.807962 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="d10a0144-94bd-4c3d-bca4-fe13a7ed6967" containerName="keystone-cron" Jan 27 14:02:36 crc kubenswrapper[4900]: E0127 14:02:36.807977 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="extract-utilities" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.807983 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="extract-utilities" Jan 27 14:02:36 crc kubenswrapper[4900]: E0127 14:02:36.808004 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.808010 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.809849 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.809887 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.809905 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.809925 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="d10a0144-94bd-4c3d-bca4-fe13a7ed6967" containerName="keystone-cron" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.809956 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.809967 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0df6989-f8c7-4cdd-a1f1-1cb78a9d046b" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: E0127 14:02:36.812115 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.812135 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.812581 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac68471d-4581-477c-bcff-415b8f8ea21e" containerName="registry-server" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.902337 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/must-gather-whhtm" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.909209 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-47j5g/must-gather-whhtm"] Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.918248 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-47j5g"/"kube-root-ca.crt" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.920910 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-47j5g"/"default-dockercfg-q84dv" Jan 27 14:02:36 crc kubenswrapper[4900]: I0127 14:02:36.925926 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-47j5g"/"openshift-service-ca.crt" Jan 27 14:02:37 crc kubenswrapper[4900]: I0127 14:02:37.026047 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdxsw\" (UniqueName: \"kubernetes.io/projected/ab9d673e-a635-497a-9322-cdbd3f9fd3b3-kube-api-access-jdxsw\") pod \"must-gather-whhtm\" (UID: \"ab9d673e-a635-497a-9322-cdbd3f9fd3b3\") " pod="openshift-must-gather-47j5g/must-gather-whhtm" Jan 27 14:02:37 crc kubenswrapper[4900]: I0127 14:02:37.026553 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ab9d673e-a635-497a-9322-cdbd3f9fd3b3-must-gather-output\") pod \"must-gather-whhtm\" (UID: \"ab9d673e-a635-497a-9322-cdbd3f9fd3b3\") " pod="openshift-must-gather-47j5g/must-gather-whhtm" Jan 27 14:02:37 crc kubenswrapper[4900]: I0127 14:02:37.130450 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdxsw\" (UniqueName: \"kubernetes.io/projected/ab9d673e-a635-497a-9322-cdbd3f9fd3b3-kube-api-access-jdxsw\") pod \"must-gather-whhtm\" (UID: \"ab9d673e-a635-497a-9322-cdbd3f9fd3b3\") " pod="openshift-must-gather-47j5g/must-gather-whhtm" Jan 27 14:02:37 crc kubenswrapper[4900]: I0127 14:02:37.130725 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ab9d673e-a635-497a-9322-cdbd3f9fd3b3-must-gather-output\") pod \"must-gather-whhtm\" (UID: \"ab9d673e-a635-497a-9322-cdbd3f9fd3b3\") " pod="openshift-must-gather-47j5g/must-gather-whhtm" Jan 27 14:02:37 crc kubenswrapper[4900]: I0127 14:02:37.132125 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ab9d673e-a635-497a-9322-cdbd3f9fd3b3-must-gather-output\") pod \"must-gather-whhtm\" (UID: \"ab9d673e-a635-497a-9322-cdbd3f9fd3b3\") " pod="openshift-must-gather-47j5g/must-gather-whhtm" Jan 27 14:02:37 crc kubenswrapper[4900]: I0127 14:02:37.165663 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdxsw\" (UniqueName: \"kubernetes.io/projected/ab9d673e-a635-497a-9322-cdbd3f9fd3b3-kube-api-access-jdxsw\") pod \"must-gather-whhtm\" (UID: \"ab9d673e-a635-497a-9322-cdbd3f9fd3b3\") " pod="openshift-must-gather-47j5g/must-gather-whhtm" Jan 27 14:02:37 crc kubenswrapper[4900]: I0127 14:02:37.257038 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/must-gather-whhtm" Jan 27 14:02:37 crc kubenswrapper[4900]: I0127 14:02:37.554535 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 14:02:37 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:02:37 crc kubenswrapper[4900]: > Jan 27 14:02:37 crc kubenswrapper[4900]: I0127 14:02:37.983035 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-47j5g/must-gather-whhtm"] Jan 27 14:02:38 crc kubenswrapper[4900]: I0127 14:02:38.048611 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-47j5g/must-gather-whhtm" event={"ID":"ab9d673e-a635-497a-9322-cdbd3f9fd3b3","Type":"ContainerStarted","Data":"7a8dee2f296aaf9009b6e0ab1c547436942b327da2c12fe5f4acd58f26b56dea"} Jan 27 14:02:45 crc kubenswrapper[4900]: I0127 14:02:45.490869 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:02:45 crc kubenswrapper[4900]: E0127 14:02:45.493882 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:02:47 crc kubenswrapper[4900]: I0127 14:02:47.514879 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 14:02:47 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:02:47 crc kubenswrapper[4900]: > Jan 27 14:02:49 crc kubenswrapper[4900]: I0127 14:02:49.375356 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-47j5g/must-gather-whhtm" event={"ID":"ab9d673e-a635-497a-9322-cdbd3f9fd3b3","Type":"ContainerStarted","Data":"8aa49726bac092695f9c1bd6d6c7c7fd28b0f04c1369846195693f9bf97b173c"} Jan 27 14:02:49 crc kubenswrapper[4900]: I0127 14:02:49.375942 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-47j5g/must-gather-whhtm" event={"ID":"ab9d673e-a635-497a-9322-cdbd3f9fd3b3","Type":"ContainerStarted","Data":"1d393d1a8374d65daf5205744e586b7e3d436659557079feb9523a3f975f969b"} Jan 27 14:02:49 crc kubenswrapper[4900]: I0127 14:02:49.456681 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-47j5g/must-gather-whhtm" podStartSLOduration=2.967133343 podStartE2EDuration="13.456650749s" podCreationTimestamp="2026-01-27 14:02:36 +0000 UTC" firstStartedPulling="2026-01-27 14:02:37.977738373 +0000 UTC m=+5785.214766583" lastFinishedPulling="2026-01-27 14:02:48.467255779 +0000 UTC m=+5795.704283989" observedRunningTime="2026-01-27 14:02:49.431524883 +0000 UTC m=+5796.668553093" watchObservedRunningTime="2026-01-27 14:02:49.456650749 +0000 UTC m=+5796.693678959" Jan 27 14:02:57 crc kubenswrapper[4900]: I0127 14:02:57.522331 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" probeResult="failure" output=< Jan 27 14:02:57 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:02:57 crc kubenswrapper[4900]: > Jan 27 14:03:00 crc kubenswrapper[4900]: I0127 14:03:00.168466 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-47j5g/crc-debug-6gsxx"] Jan 27 14:03:00 crc kubenswrapper[4900]: I0127 14:03:00.173129 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/crc-debug-6gsxx" Jan 27 14:03:00 crc kubenswrapper[4900]: I0127 14:03:00.323243 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7f4ff\" (UniqueName: \"kubernetes.io/projected/733cb890-0c0d-4b6e-a225-4b98a5cc604c-kube-api-access-7f4ff\") pod \"crc-debug-6gsxx\" (UID: \"733cb890-0c0d-4b6e-a225-4b98a5cc604c\") " pod="openshift-must-gather-47j5g/crc-debug-6gsxx" Jan 27 14:03:00 crc kubenswrapper[4900]: I0127 14:03:00.323454 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/733cb890-0c0d-4b6e-a225-4b98a5cc604c-host\") pod \"crc-debug-6gsxx\" (UID: \"733cb890-0c0d-4b6e-a225-4b98a5cc604c\") " pod="openshift-must-gather-47j5g/crc-debug-6gsxx" Jan 27 14:03:00 crc kubenswrapper[4900]: I0127 14:03:00.426750 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/733cb890-0c0d-4b6e-a225-4b98a5cc604c-host\") pod \"crc-debug-6gsxx\" (UID: \"733cb890-0c0d-4b6e-a225-4b98a5cc604c\") " pod="openshift-must-gather-47j5g/crc-debug-6gsxx" Jan 27 14:03:00 crc kubenswrapper[4900]: I0127 14:03:00.427395 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/733cb890-0c0d-4b6e-a225-4b98a5cc604c-host\") pod \"crc-debug-6gsxx\" (UID: \"733cb890-0c0d-4b6e-a225-4b98a5cc604c\") " pod="openshift-must-gather-47j5g/crc-debug-6gsxx" Jan 27 14:03:00 crc kubenswrapper[4900]: I0127 14:03:00.429802 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7f4ff\" (UniqueName: \"kubernetes.io/projected/733cb890-0c0d-4b6e-a225-4b98a5cc604c-kube-api-access-7f4ff\") pod \"crc-debug-6gsxx\" (UID: \"733cb890-0c0d-4b6e-a225-4b98a5cc604c\") " pod="openshift-must-gather-47j5g/crc-debug-6gsxx" Jan 27 14:03:00 crc kubenswrapper[4900]: I0127 14:03:00.484428 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:03:00 crc kubenswrapper[4900]: E0127 14:03:00.485031 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:03:00 crc kubenswrapper[4900]: I0127 14:03:00.504901 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7f4ff\" (UniqueName: \"kubernetes.io/projected/733cb890-0c0d-4b6e-a225-4b98a5cc604c-kube-api-access-7f4ff\") pod \"crc-debug-6gsxx\" (UID: \"733cb890-0c0d-4b6e-a225-4b98a5cc604c\") " pod="openshift-must-gather-47j5g/crc-debug-6gsxx" Jan 27 14:03:00 crc kubenswrapper[4900]: I0127 14:03:00.798707 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/crc-debug-6gsxx" Jan 27 14:03:00 crc kubenswrapper[4900]: W0127 14:03:00.848590 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod733cb890_0c0d_4b6e_a225_4b98a5cc604c.slice/crio-089e0250a9cc86b1b7a10329a00ca158905b4b6e4d2901b1c13031a3ccce7657 WatchSource:0}: Error finding container 089e0250a9cc86b1b7a10329a00ca158905b4b6e4d2901b1c13031a3ccce7657: Status 404 returned error can't find the container with id 089e0250a9cc86b1b7a10329a00ca158905b4b6e4d2901b1c13031a3ccce7657 Jan 27 14:03:01 crc kubenswrapper[4900]: I0127 14:03:01.642443 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-47j5g/crc-debug-6gsxx" event={"ID":"733cb890-0c0d-4b6e-a225-4b98a5cc604c","Type":"ContainerStarted","Data":"089e0250a9cc86b1b7a10329a00ca158905b4b6e4d2901b1c13031a3ccce7657"} Jan 27 14:03:06 crc kubenswrapper[4900]: I0127 14:03:06.633946 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 14:03:06 crc kubenswrapper[4900]: I0127 14:03:06.726755 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 14:03:06 crc kubenswrapper[4900]: I0127 14:03:06.902938 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bngd7"] Jan 27 14:03:07 crc kubenswrapper[4900]: I0127 14:03:07.756767 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" containerID="cri-o://4747daeee1b8f8ba99b6a5c7528e9ea7625e0429432949c07bceb0f5011f7798" gracePeriod=2 Jan 27 14:03:08 crc kubenswrapper[4900]: I0127 14:03:08.825932 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bngd7_80fbf804-7171-46c3-b2dc-8a01b3bddb9c/registry-server/3.log" Jan 27 14:03:08 crc kubenswrapper[4900]: I0127 14:03:08.828752 4900 generic.go:334] "Generic (PLEG): container finished" podID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerID="4747daeee1b8f8ba99b6a5c7528e9ea7625e0429432949c07bceb0f5011f7798" exitCode=0 Jan 27 14:03:08 crc kubenswrapper[4900]: I0127 14:03:08.828836 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerDied","Data":"4747daeee1b8f8ba99b6a5c7528e9ea7625e0429432949c07bceb0f5011f7798"} Jan 27 14:03:08 crc kubenswrapper[4900]: I0127 14:03:08.828933 4900 scope.go:117] "RemoveContainer" containerID="9991df40d410601ffe901c22904434f4a34bf385283369ac4358e2182f170478" Jan 27 14:03:13 crc kubenswrapper[4900]: I0127 14:03:13.484499 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:03:13 crc kubenswrapper[4900]: E0127 14:03:13.486015 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:03:16 crc kubenswrapper[4900]: E0127 14:03:16.457434 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4747daeee1b8f8ba99b6a5c7528e9ea7625e0429432949c07bceb0f5011f7798 is running failed: container process not found" containerID="4747daeee1b8f8ba99b6a5c7528e9ea7625e0429432949c07bceb0f5011f7798" cmd=["grpc_health_probe","-addr=:50051"] Jan 27 14:03:16 crc kubenswrapper[4900]: E0127 14:03:16.458810 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4747daeee1b8f8ba99b6a5c7528e9ea7625e0429432949c07bceb0f5011f7798 is running failed: container process not found" containerID="4747daeee1b8f8ba99b6a5c7528e9ea7625e0429432949c07bceb0f5011f7798" cmd=["grpc_health_probe","-addr=:50051"] Jan 27 14:03:16 crc kubenswrapper[4900]: E0127 14:03:16.459752 4900 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4747daeee1b8f8ba99b6a5c7528e9ea7625e0429432949c07bceb0f5011f7798 is running failed: container process not found" containerID="4747daeee1b8f8ba99b6a5c7528e9ea7625e0429432949c07bceb0f5011f7798" cmd=["grpc_health_probe","-addr=:50051"] Jan 27 14:03:16 crc kubenswrapper[4900]: E0127 14:03:16.459794 4900 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4747daeee1b8f8ba99b6a5c7528e9ea7625e0429432949c07bceb0f5011f7798 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-bngd7" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:17 crc kubenswrapper[4900]: I0127 14:03:17.272525 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 14:03:17 crc kubenswrapper[4900]: I0127 14:03:17.445565 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxwf4\" (UniqueName: \"kubernetes.io/projected/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-kube-api-access-xxwf4\") pod \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\" (UID: \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\") " Jan 27 14:03:17 crc kubenswrapper[4900]: I0127 14:03:17.446155 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-catalog-content\") pod \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\" (UID: \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\") " Jan 27 14:03:17 crc kubenswrapper[4900]: I0127 14:03:17.446297 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-utilities\") pod \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\" (UID: \"80fbf804-7171-46c3-b2dc-8a01b3bddb9c\") " Jan 27 14:03:17 crc kubenswrapper[4900]: I0127 14:03:17.466541 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-utilities" (OuterVolumeSpecName: "utilities") pod "80fbf804-7171-46c3-b2dc-8a01b3bddb9c" (UID: "80fbf804-7171-46c3-b2dc-8a01b3bddb9c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:03:17 crc kubenswrapper[4900]: I0127 14:03:17.551211 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 14:03:17 crc kubenswrapper[4900]: I0127 14:03:17.572336 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "80fbf804-7171-46c3-b2dc-8a01b3bddb9c" (UID: "80fbf804-7171-46c3-b2dc-8a01b3bddb9c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:03:17 crc kubenswrapper[4900]: I0127 14:03:17.655572 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 14:03:17 crc kubenswrapper[4900]: I0127 14:03:17.999285 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bngd7" event={"ID":"80fbf804-7171-46c3-b2dc-8a01b3bddb9c","Type":"ContainerDied","Data":"99695c4734d150f4b9b43128d23095a70ddb29428e6e542405b90992d08d5386"} Jan 27 14:03:17 crc kubenswrapper[4900]: I0127 14:03:17.999354 4900 scope.go:117] "RemoveContainer" containerID="4747daeee1b8f8ba99b6a5c7528e9ea7625e0429432949c07bceb0f5011f7798" Jan 27 14:03:17 crc kubenswrapper[4900]: I0127 14:03:17.999377 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bngd7" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.036594 4900 scope.go:117] "RemoveContainer" containerID="cb571937f332df49c21e8733c046378c8a22fc86e36efb696ce13d1aa746660a" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.425190 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-kube-api-access-xxwf4" (OuterVolumeSpecName: "kube-api-access-xxwf4") pod "80fbf804-7171-46c3-b2dc-8a01b3bddb9c" (UID: "80fbf804-7171-46c3-b2dc-8a01b3bddb9c"). InnerVolumeSpecName "kube-api-access-xxwf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.469249 4900 scope.go:117] "RemoveContainer" containerID="a35c26b5c90bfcd044062ae5637eedba55e1f06b256e26859d5f1b397fda0076" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.480422 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxwf4\" (UniqueName: \"kubernetes.io/projected/80fbf804-7171-46c3-b2dc-8a01b3bddb9c-kube-api-access-xxwf4\") on node \"crc\" DevicePath \"\"" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.778228 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bngd7"] Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.805703 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bngd7"] Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.848505 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qvq6b"] Jan 27 14:03:18 crc kubenswrapper[4900]: E0127 14:03:18.850151 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="extract-utilities" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.850186 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="extract-utilities" Jan 27 14:03:18 crc kubenswrapper[4900]: E0127 14:03:18.850213 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="extract-content" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.850225 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="extract-content" Jan 27 14:03:18 crc kubenswrapper[4900]: E0127 14:03:18.850262 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.850271 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: E0127 14:03:18.850309 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.850318 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: E0127 14:03:18.850360 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.850372 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.850802 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.850820 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.850844 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.850888 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: E0127 14:03:18.851324 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.851340 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: E0127 14:03:18.851369 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.851379 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.851745 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" containerName="registry-server" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.854279 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:03:18 crc kubenswrapper[4900]: I0127 14:03:18.865345 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qvq6b"] Jan 27 14:03:19 crc kubenswrapper[4900]: I0127 14:03:19.020138 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/692a81bb-019a-40bd-8d90-ffd7adb09ca8-catalog-content\") pod \"redhat-operators-qvq6b\" (UID: \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\") " pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:03:19 crc kubenswrapper[4900]: I0127 14:03:19.020248 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szdk8\" (UniqueName: \"kubernetes.io/projected/692a81bb-019a-40bd-8d90-ffd7adb09ca8-kube-api-access-szdk8\") pod \"redhat-operators-qvq6b\" (UID: \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\") " pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:03:19 crc kubenswrapper[4900]: I0127 14:03:19.020648 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/692a81bb-019a-40bd-8d90-ffd7adb09ca8-utilities\") pod \"redhat-operators-qvq6b\" (UID: \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\") " pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:03:19 crc kubenswrapper[4900]: I0127 14:03:19.046454 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-47j5g/crc-debug-6gsxx" event={"ID":"733cb890-0c0d-4b6e-a225-4b98a5cc604c","Type":"ContainerStarted","Data":"dd3e2d59d222fd621bfdb6c938702d3f4744951215b85ad3b6cf39dca46b2e62"} Jan 27 14:03:19 crc kubenswrapper[4900]: I0127 14:03:19.087380 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-47j5g/crc-debug-6gsxx" podStartSLOduration=2.719907892 podStartE2EDuration="19.087353262s" podCreationTimestamp="2026-01-27 14:03:00 +0000 UTC" firstStartedPulling="2026-01-27 14:03:00.852634796 +0000 UTC m=+5808.089662996" lastFinishedPulling="2026-01-27 14:03:17.220080156 +0000 UTC m=+5824.457108366" observedRunningTime="2026-01-27 14:03:19.083265213 +0000 UTC m=+5826.320293443" watchObservedRunningTime="2026-01-27 14:03:19.087353262 +0000 UTC m=+5826.324381472" Jan 27 14:03:19 crc kubenswrapper[4900]: I0127 14:03:19.123990 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/692a81bb-019a-40bd-8d90-ffd7adb09ca8-utilities\") pod \"redhat-operators-qvq6b\" (UID: \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\") " pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:03:19 crc kubenswrapper[4900]: I0127 14:03:19.124554 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/692a81bb-019a-40bd-8d90-ffd7adb09ca8-catalog-content\") pod \"redhat-operators-qvq6b\" (UID: \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\") " pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:03:19 crc kubenswrapper[4900]: I0127 14:03:19.124616 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szdk8\" (UniqueName: \"kubernetes.io/projected/692a81bb-019a-40bd-8d90-ffd7adb09ca8-kube-api-access-szdk8\") pod \"redhat-operators-qvq6b\" (UID: \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\") " pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:03:19 crc kubenswrapper[4900]: I0127 14:03:19.124839 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/692a81bb-019a-40bd-8d90-ffd7adb09ca8-utilities\") pod \"redhat-operators-qvq6b\" (UID: \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\") " pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:03:19 crc kubenswrapper[4900]: I0127 14:03:19.125324 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/692a81bb-019a-40bd-8d90-ffd7adb09ca8-catalog-content\") pod \"redhat-operators-qvq6b\" (UID: \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\") " pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:03:19 crc kubenswrapper[4900]: I0127 14:03:19.166924 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szdk8\" (UniqueName: \"kubernetes.io/projected/692a81bb-019a-40bd-8d90-ffd7adb09ca8-kube-api-access-szdk8\") pod \"redhat-operators-qvq6b\" (UID: \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\") " pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:03:19 crc kubenswrapper[4900]: I0127 14:03:19.190441 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:03:19 crc kubenswrapper[4900]: I0127 14:03:19.926557 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qvq6b"] Jan 27 14:03:19 crc kubenswrapper[4900]: W0127 14:03:19.938689 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod692a81bb_019a_40bd_8d90_ffd7adb09ca8.slice/crio-50e4633e1837285901557b4e34795fc929aa954f4cba75b6c3f50f38dd76c37b WatchSource:0}: Error finding container 50e4633e1837285901557b4e34795fc929aa954f4cba75b6c3f50f38dd76c37b: Status 404 returned error can't find the container with id 50e4633e1837285901557b4e34795fc929aa954f4cba75b6c3f50f38dd76c37b Jan 27 14:03:20 crc kubenswrapper[4900]: I0127 14:03:20.063020 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qvq6b" event={"ID":"692a81bb-019a-40bd-8d90-ffd7adb09ca8","Type":"ContainerStarted","Data":"50e4633e1837285901557b4e34795fc929aa954f4cba75b6c3f50f38dd76c37b"} Jan 27 14:03:20 crc kubenswrapper[4900]: I0127 14:03:20.503125 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80fbf804-7171-46c3-b2dc-8a01b3bddb9c" path="/var/lib/kubelet/pods/80fbf804-7171-46c3-b2dc-8a01b3bddb9c/volumes" Jan 27 14:03:21 crc kubenswrapper[4900]: I0127 14:03:21.080359 4900 generic.go:334] "Generic (PLEG): container finished" podID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerID="9a3395ef5bdabe92e1ad0b456192ff5d0192dc5447f6754a2dcdecc8c30ef2a6" exitCode=0 Jan 27 14:03:21 crc kubenswrapper[4900]: I0127 14:03:21.080514 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qvq6b" event={"ID":"692a81bb-019a-40bd-8d90-ffd7adb09ca8","Type":"ContainerDied","Data":"9a3395ef5bdabe92e1ad0b456192ff5d0192dc5447f6754a2dcdecc8c30ef2a6"} Jan 27 14:03:23 crc kubenswrapper[4900]: I0127 14:03:23.118120 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qvq6b" event={"ID":"692a81bb-019a-40bd-8d90-ffd7adb09ca8","Type":"ContainerStarted","Data":"f802c8a644e2fed8bb4a199cedccc2a3f87848bd5995a7ab80d90d8d76aabad0"} Jan 27 14:03:24 crc kubenswrapper[4900]: I0127 14:03:24.482965 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:03:24 crc kubenswrapper[4900]: E0127 14:03:24.483919 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:03:25 crc kubenswrapper[4900]: I0127 14:03:25.151705 4900 generic.go:334] "Generic (PLEG): container finished" podID="61ef39b0-502c-45d5-be3a-e11c6ae19d59" containerID="99bf9c8acda5d1f77bd4f37089b3e7e853fbbc968d684f9f2b5e55f9baf527a1" exitCode=0 Jan 27 14:03:25 crc kubenswrapper[4900]: I0127 14:03:25.151859 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" event={"ID":"61ef39b0-502c-45d5-be3a-e11c6ae19d59","Type":"ContainerDied","Data":"99bf9c8acda5d1f77bd4f37089b3e7e853fbbc968d684f9f2b5e55f9baf527a1"} Jan 27 14:03:27 crc kubenswrapper[4900]: I0127 14:03:27.184540 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" event={"ID":"61ef39b0-502c-45d5-be3a-e11c6ae19d59","Type":"ContainerStarted","Data":"9d75384e8a8a412d38db7ffa51cd0c7e6d90ea0632181df3467d3894f4582b74"} Jan 27 14:03:35 crc kubenswrapper[4900]: I0127 14:03:35.304751 4900 generic.go:334] "Generic (PLEG): container finished" podID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerID="f802c8a644e2fed8bb4a199cedccc2a3f87848bd5995a7ab80d90d8d76aabad0" exitCode=0 Jan 27 14:03:35 crc kubenswrapper[4900]: I0127 14:03:35.304828 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qvq6b" event={"ID":"692a81bb-019a-40bd-8d90-ffd7adb09ca8","Type":"ContainerDied","Data":"f802c8a644e2fed8bb4a199cedccc2a3f87848bd5995a7ab80d90d8d76aabad0"} Jan 27 14:03:36 crc kubenswrapper[4900]: I0127 14:03:36.495906 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:03:36 crc kubenswrapper[4900]: E0127 14:03:36.496986 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:03:37 crc kubenswrapper[4900]: I0127 14:03:37.337643 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qvq6b" event={"ID":"692a81bb-019a-40bd-8d90-ffd7adb09ca8","Type":"ContainerStarted","Data":"0852f1757089177a9398711de93ff1e7f02c55c9099036036032bc4bbbba519d"} Jan 27 14:03:37 crc kubenswrapper[4900]: I0127 14:03:37.377899 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qvq6b" podStartSLOduration=4.164470153 podStartE2EDuration="19.377873238s" podCreationTimestamp="2026-01-27 14:03:18 +0000 UTC" firstStartedPulling="2026-01-27 14:03:21.085035019 +0000 UTC m=+5828.322063229" lastFinishedPulling="2026-01-27 14:03:36.298438104 +0000 UTC m=+5843.535466314" observedRunningTime="2026-01-27 14:03:37.367777406 +0000 UTC m=+5844.604805616" watchObservedRunningTime="2026-01-27 14:03:37.377873238 +0000 UTC m=+5844.614901448" Jan 27 14:03:39 crc kubenswrapper[4900]: I0127 14:03:39.191691 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:03:39 crc kubenswrapper[4900]: I0127 14:03:39.192571 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:03:40 crc kubenswrapper[4900]: I0127 14:03:40.253010 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qvq6b" podUID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerName="registry-server" probeResult="failure" output=< Jan 27 14:03:40 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:03:40 crc kubenswrapper[4900]: > Jan 27 14:03:43 crc kubenswrapper[4900]: I0127 14:03:43.753427 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 14:03:43 crc kubenswrapper[4900]: I0127 14:03:43.754241 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 14:03:47 crc kubenswrapper[4900]: I0127 14:03:47.482997 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:03:47 crc kubenswrapper[4900]: E0127 14:03:47.484178 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:03:50 crc kubenswrapper[4900]: I0127 14:03:50.254234 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qvq6b" podUID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerName="registry-server" probeResult="failure" output=< Jan 27 14:03:50 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:03:50 crc kubenswrapper[4900]: > Jan 27 14:04:00 crc kubenswrapper[4900]: I0127 14:04:00.258909 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qvq6b" podUID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerName="registry-server" probeResult="failure" output=< Jan 27 14:04:00 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:04:00 crc kubenswrapper[4900]: > Jan 27 14:04:01 crc kubenswrapper[4900]: I0127 14:04:01.483489 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:04:01 crc kubenswrapper[4900]: E0127 14:04:01.484267 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:04:03 crc kubenswrapper[4900]: I0127 14:04:03.761848 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 14:04:03 crc kubenswrapper[4900]: I0127 14:04:03.775679 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-7dbbbb77f-fjj4n" Jan 27 14:04:10 crc kubenswrapper[4900]: I0127 14:04:10.261862 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qvq6b" podUID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerName="registry-server" probeResult="failure" output=< Jan 27 14:04:10 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:04:10 crc kubenswrapper[4900]: > Jan 27 14:04:16 crc kubenswrapper[4900]: I0127 14:04:16.499916 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:04:16 crc kubenswrapper[4900]: E0127 14:04:16.505336 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:04:19 crc kubenswrapper[4900]: I0127 14:04:19.279138 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:04:19 crc kubenswrapper[4900]: I0127 14:04:19.348899 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:04:19 crc kubenswrapper[4900]: I0127 14:04:19.555574 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qvq6b"] Jan 27 14:04:21 crc kubenswrapper[4900]: I0127 14:04:21.046593 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qvq6b" podUID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerName="registry-server" containerID="cri-o://0852f1757089177a9398711de93ff1e7f02c55c9099036036032bc4bbbba519d" gracePeriod=2 Jan 27 14:04:21 crc kubenswrapper[4900]: I0127 14:04:21.748991 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:04:21 crc kubenswrapper[4900]: I0127 14:04:21.837403 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szdk8\" (UniqueName: \"kubernetes.io/projected/692a81bb-019a-40bd-8d90-ffd7adb09ca8-kube-api-access-szdk8\") pod \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\" (UID: \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\") " Jan 27 14:04:21 crc kubenswrapper[4900]: I0127 14:04:21.837596 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/692a81bb-019a-40bd-8d90-ffd7adb09ca8-utilities\") pod \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\" (UID: \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\") " Jan 27 14:04:21 crc kubenswrapper[4900]: I0127 14:04:21.838131 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/692a81bb-019a-40bd-8d90-ffd7adb09ca8-catalog-content\") pod \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\" (UID: \"692a81bb-019a-40bd-8d90-ffd7adb09ca8\") " Jan 27 14:04:21 crc kubenswrapper[4900]: I0127 14:04:21.838504 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/692a81bb-019a-40bd-8d90-ffd7adb09ca8-utilities" (OuterVolumeSpecName: "utilities") pod "692a81bb-019a-40bd-8d90-ffd7adb09ca8" (UID: "692a81bb-019a-40bd-8d90-ffd7adb09ca8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:04:21 crc kubenswrapper[4900]: I0127 14:04:21.839238 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/692a81bb-019a-40bd-8d90-ffd7adb09ca8-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 14:04:21 crc kubenswrapper[4900]: I0127 14:04:21.859346 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/692a81bb-019a-40bd-8d90-ffd7adb09ca8-kube-api-access-szdk8" (OuterVolumeSpecName: "kube-api-access-szdk8") pod "692a81bb-019a-40bd-8d90-ffd7adb09ca8" (UID: "692a81bb-019a-40bd-8d90-ffd7adb09ca8"). InnerVolumeSpecName "kube-api-access-szdk8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:04:21 crc kubenswrapper[4900]: I0127 14:04:21.942229 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szdk8\" (UniqueName: \"kubernetes.io/projected/692a81bb-019a-40bd-8d90-ffd7adb09ca8-kube-api-access-szdk8\") on node \"crc\" DevicePath \"\"" Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.023599 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/692a81bb-019a-40bd-8d90-ffd7adb09ca8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "692a81bb-019a-40bd-8d90-ffd7adb09ca8" (UID: "692a81bb-019a-40bd-8d90-ffd7adb09ca8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.045957 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/692a81bb-019a-40bd-8d90-ffd7adb09ca8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.067923 4900 generic.go:334] "Generic (PLEG): container finished" podID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerID="0852f1757089177a9398711de93ff1e7f02c55c9099036036032bc4bbbba519d" exitCode=0 Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.067993 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qvq6b" event={"ID":"692a81bb-019a-40bd-8d90-ffd7adb09ca8","Type":"ContainerDied","Data":"0852f1757089177a9398711de93ff1e7f02c55c9099036036032bc4bbbba519d"} Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.068043 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qvq6b" event={"ID":"692a81bb-019a-40bd-8d90-ffd7adb09ca8","Type":"ContainerDied","Data":"50e4633e1837285901557b4e34795fc929aa954f4cba75b6c3f50f38dd76c37b"} Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.068090 4900 scope.go:117] "RemoveContainer" containerID="0852f1757089177a9398711de93ff1e7f02c55c9099036036032bc4bbbba519d" Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.068330 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qvq6b" Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.114973 4900 scope.go:117] "RemoveContainer" containerID="f802c8a644e2fed8bb4a199cedccc2a3f87848bd5995a7ab80d90d8d76aabad0" Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.138810 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qvq6b"] Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.151368 4900 scope.go:117] "RemoveContainer" containerID="9a3395ef5bdabe92e1ad0b456192ff5d0192dc5447f6754a2dcdecc8c30ef2a6" Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.156464 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qvq6b"] Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.222390 4900 scope.go:117] "RemoveContainer" containerID="0852f1757089177a9398711de93ff1e7f02c55c9099036036032bc4bbbba519d" Jan 27 14:04:22 crc kubenswrapper[4900]: E0127 14:04:22.223202 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0852f1757089177a9398711de93ff1e7f02c55c9099036036032bc4bbbba519d\": container with ID starting with 0852f1757089177a9398711de93ff1e7f02c55c9099036036032bc4bbbba519d not found: ID does not exist" containerID="0852f1757089177a9398711de93ff1e7f02c55c9099036036032bc4bbbba519d" Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.223806 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0852f1757089177a9398711de93ff1e7f02c55c9099036036032bc4bbbba519d"} err="failed to get container status \"0852f1757089177a9398711de93ff1e7f02c55c9099036036032bc4bbbba519d\": rpc error: code = NotFound desc = could not find container \"0852f1757089177a9398711de93ff1e7f02c55c9099036036032bc4bbbba519d\": container with ID starting with 0852f1757089177a9398711de93ff1e7f02c55c9099036036032bc4bbbba519d not found: ID does not exist" Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.223859 4900 scope.go:117] "RemoveContainer" containerID="f802c8a644e2fed8bb4a199cedccc2a3f87848bd5995a7ab80d90d8d76aabad0" Jan 27 14:04:22 crc kubenswrapper[4900]: E0127 14:04:22.224441 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f802c8a644e2fed8bb4a199cedccc2a3f87848bd5995a7ab80d90d8d76aabad0\": container with ID starting with f802c8a644e2fed8bb4a199cedccc2a3f87848bd5995a7ab80d90d8d76aabad0 not found: ID does not exist" containerID="f802c8a644e2fed8bb4a199cedccc2a3f87848bd5995a7ab80d90d8d76aabad0" Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.224503 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f802c8a644e2fed8bb4a199cedccc2a3f87848bd5995a7ab80d90d8d76aabad0"} err="failed to get container status \"f802c8a644e2fed8bb4a199cedccc2a3f87848bd5995a7ab80d90d8d76aabad0\": rpc error: code = NotFound desc = could not find container \"f802c8a644e2fed8bb4a199cedccc2a3f87848bd5995a7ab80d90d8d76aabad0\": container with ID starting with f802c8a644e2fed8bb4a199cedccc2a3f87848bd5995a7ab80d90d8d76aabad0 not found: ID does not exist" Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.224550 4900 scope.go:117] "RemoveContainer" containerID="9a3395ef5bdabe92e1ad0b456192ff5d0192dc5447f6754a2dcdecc8c30ef2a6" Jan 27 14:04:22 crc kubenswrapper[4900]: E0127 14:04:22.225275 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a3395ef5bdabe92e1ad0b456192ff5d0192dc5447f6754a2dcdecc8c30ef2a6\": container with ID starting with 9a3395ef5bdabe92e1ad0b456192ff5d0192dc5447f6754a2dcdecc8c30ef2a6 not found: ID does not exist" containerID="9a3395ef5bdabe92e1ad0b456192ff5d0192dc5447f6754a2dcdecc8c30ef2a6" Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.225343 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a3395ef5bdabe92e1ad0b456192ff5d0192dc5447f6754a2dcdecc8c30ef2a6"} err="failed to get container status \"9a3395ef5bdabe92e1ad0b456192ff5d0192dc5447f6754a2dcdecc8c30ef2a6\": rpc error: code = NotFound desc = could not find container \"9a3395ef5bdabe92e1ad0b456192ff5d0192dc5447f6754a2dcdecc8c30ef2a6\": container with ID starting with 9a3395ef5bdabe92e1ad0b456192ff5d0192dc5447f6754a2dcdecc8c30ef2a6 not found: ID does not exist" Jan 27 14:04:22 crc kubenswrapper[4900]: I0127 14:04:22.510785 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" path="/var/lib/kubelet/pods/692a81bb-019a-40bd-8d90-ffd7adb09ca8/volumes" Jan 27 14:04:25 crc kubenswrapper[4900]: I0127 14:04:25.131769 4900 generic.go:334] "Generic (PLEG): container finished" podID="733cb890-0c0d-4b6e-a225-4b98a5cc604c" containerID="dd3e2d59d222fd621bfdb6c938702d3f4744951215b85ad3b6cf39dca46b2e62" exitCode=0 Jan 27 14:04:25 crc kubenswrapper[4900]: I0127 14:04:25.131891 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-47j5g/crc-debug-6gsxx" event={"ID":"733cb890-0c0d-4b6e-a225-4b98a5cc604c","Type":"ContainerDied","Data":"dd3e2d59d222fd621bfdb6c938702d3f4744951215b85ad3b6cf39dca46b2e62"} Jan 27 14:04:26 crc kubenswrapper[4900]: I0127 14:04:26.318371 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/crc-debug-6gsxx" Jan 27 14:04:26 crc kubenswrapper[4900]: I0127 14:04:26.383875 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-47j5g/crc-debug-6gsxx"] Jan 27 14:04:26 crc kubenswrapper[4900]: I0127 14:04:26.398196 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-47j5g/crc-debug-6gsxx"] Jan 27 14:04:26 crc kubenswrapper[4900]: I0127 14:04:26.405538 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7f4ff\" (UniqueName: \"kubernetes.io/projected/733cb890-0c0d-4b6e-a225-4b98a5cc604c-kube-api-access-7f4ff\") pod \"733cb890-0c0d-4b6e-a225-4b98a5cc604c\" (UID: \"733cb890-0c0d-4b6e-a225-4b98a5cc604c\") " Jan 27 14:04:26 crc kubenswrapper[4900]: I0127 14:04:26.405754 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/733cb890-0c0d-4b6e-a225-4b98a5cc604c-host\") pod \"733cb890-0c0d-4b6e-a225-4b98a5cc604c\" (UID: \"733cb890-0c0d-4b6e-a225-4b98a5cc604c\") " Jan 27 14:04:26 crc kubenswrapper[4900]: I0127 14:04:26.406962 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/733cb890-0c0d-4b6e-a225-4b98a5cc604c-host" (OuterVolumeSpecName: "host") pod "733cb890-0c0d-4b6e-a225-4b98a5cc604c" (UID: "733cb890-0c0d-4b6e-a225-4b98a5cc604c"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 14:04:26 crc kubenswrapper[4900]: I0127 14:04:26.417119 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/733cb890-0c0d-4b6e-a225-4b98a5cc604c-kube-api-access-7f4ff" (OuterVolumeSpecName: "kube-api-access-7f4ff") pod "733cb890-0c0d-4b6e-a225-4b98a5cc604c" (UID: "733cb890-0c0d-4b6e-a225-4b98a5cc604c"). InnerVolumeSpecName "kube-api-access-7f4ff". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:04:26 crc kubenswrapper[4900]: I0127 14:04:26.502435 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="733cb890-0c0d-4b6e-a225-4b98a5cc604c" path="/var/lib/kubelet/pods/733cb890-0c0d-4b6e-a225-4b98a5cc604c/volumes" Jan 27 14:04:26 crc kubenswrapper[4900]: I0127 14:04:26.509856 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7f4ff\" (UniqueName: \"kubernetes.io/projected/733cb890-0c0d-4b6e-a225-4b98a5cc604c-kube-api-access-7f4ff\") on node \"crc\" DevicePath \"\"" Jan 27 14:04:26 crc kubenswrapper[4900]: I0127 14:04:26.509887 4900 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/733cb890-0c0d-4b6e-a225-4b98a5cc604c-host\") on node \"crc\" DevicePath \"\"" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.163100 4900 scope.go:117] "RemoveContainer" containerID="dd3e2d59d222fd621bfdb6c938702d3f4744951215b85ad3b6cf39dca46b2e62" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.163624 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/crc-debug-6gsxx" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.622602 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-47j5g/crc-debug-mz8jn"] Jan 27 14:04:27 crc kubenswrapper[4900]: E0127 14:04:27.625489 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="733cb890-0c0d-4b6e-a225-4b98a5cc604c" containerName="container-00" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.625641 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="733cb890-0c0d-4b6e-a225-4b98a5cc604c" containerName="container-00" Jan 27 14:04:27 crc kubenswrapper[4900]: E0127 14:04:27.625805 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerName="extract-utilities" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.625973 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerName="extract-utilities" Jan 27 14:04:27 crc kubenswrapper[4900]: E0127 14:04:27.626119 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerName="registry-server" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.627310 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerName="registry-server" Jan 27 14:04:27 crc kubenswrapper[4900]: E0127 14:04:27.627743 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerName="extract-content" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.627873 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerName="extract-content" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.628987 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="733cb890-0c0d-4b6e-a225-4b98a5cc604c" containerName="container-00" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.629226 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="692a81bb-019a-40bd-8d90-ffd7adb09ca8" containerName="registry-server" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.631257 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/crc-debug-mz8jn" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.761151 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4c4x\" (UniqueName: \"kubernetes.io/projected/f7dce65a-ad91-4d5e-b1dd-24570daf4776-kube-api-access-x4c4x\") pod \"crc-debug-mz8jn\" (UID: \"f7dce65a-ad91-4d5e-b1dd-24570daf4776\") " pod="openshift-must-gather-47j5g/crc-debug-mz8jn" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.761281 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f7dce65a-ad91-4d5e-b1dd-24570daf4776-host\") pod \"crc-debug-mz8jn\" (UID: \"f7dce65a-ad91-4d5e-b1dd-24570daf4776\") " pod="openshift-must-gather-47j5g/crc-debug-mz8jn" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.864815 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4c4x\" (UniqueName: \"kubernetes.io/projected/f7dce65a-ad91-4d5e-b1dd-24570daf4776-kube-api-access-x4c4x\") pod \"crc-debug-mz8jn\" (UID: \"f7dce65a-ad91-4d5e-b1dd-24570daf4776\") " pod="openshift-must-gather-47j5g/crc-debug-mz8jn" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.864893 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f7dce65a-ad91-4d5e-b1dd-24570daf4776-host\") pod \"crc-debug-mz8jn\" (UID: \"f7dce65a-ad91-4d5e-b1dd-24570daf4776\") " pod="openshift-must-gather-47j5g/crc-debug-mz8jn" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.865203 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f7dce65a-ad91-4d5e-b1dd-24570daf4776-host\") pod \"crc-debug-mz8jn\" (UID: \"f7dce65a-ad91-4d5e-b1dd-24570daf4776\") " pod="openshift-must-gather-47j5g/crc-debug-mz8jn" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.897031 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4c4x\" (UniqueName: \"kubernetes.io/projected/f7dce65a-ad91-4d5e-b1dd-24570daf4776-kube-api-access-x4c4x\") pod \"crc-debug-mz8jn\" (UID: \"f7dce65a-ad91-4d5e-b1dd-24570daf4776\") " pod="openshift-must-gather-47j5g/crc-debug-mz8jn" Jan 27 14:04:27 crc kubenswrapper[4900]: I0127 14:04:27.960187 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/crc-debug-mz8jn" Jan 27 14:04:28 crc kubenswrapper[4900]: I0127 14:04:28.185681 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-47j5g/crc-debug-mz8jn" event={"ID":"f7dce65a-ad91-4d5e-b1dd-24570daf4776","Type":"ContainerStarted","Data":"eaba55323f7d048f1357592e15cc989529fdd140a5922720a2d726c70c7548b1"} Jan 27 14:04:29 crc kubenswrapper[4900]: I0127 14:04:29.202994 4900 generic.go:334] "Generic (PLEG): container finished" podID="f7dce65a-ad91-4d5e-b1dd-24570daf4776" containerID="4a651343831316aec7b4dc4a45c9eccb34e826e57d6b07827b71e482971f83b1" exitCode=0 Jan 27 14:04:29 crc kubenswrapper[4900]: I0127 14:04:29.203192 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-47j5g/crc-debug-mz8jn" event={"ID":"f7dce65a-ad91-4d5e-b1dd-24570daf4776","Type":"ContainerDied","Data":"4a651343831316aec7b4dc4a45c9eccb34e826e57d6b07827b71e482971f83b1"} Jan 27 14:04:30 crc kubenswrapper[4900]: I0127 14:04:30.432873 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/crc-debug-mz8jn" Jan 27 14:04:30 crc kubenswrapper[4900]: I0127 14:04:30.484270 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:04:30 crc kubenswrapper[4900]: E0127 14:04:30.485536 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:04:30 crc kubenswrapper[4900]: I0127 14:04:30.575718 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4c4x\" (UniqueName: \"kubernetes.io/projected/f7dce65a-ad91-4d5e-b1dd-24570daf4776-kube-api-access-x4c4x\") pod \"f7dce65a-ad91-4d5e-b1dd-24570daf4776\" (UID: \"f7dce65a-ad91-4d5e-b1dd-24570daf4776\") " Jan 27 14:04:30 crc kubenswrapper[4900]: I0127 14:04:30.576372 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f7dce65a-ad91-4d5e-b1dd-24570daf4776-host\") pod \"f7dce65a-ad91-4d5e-b1dd-24570daf4776\" (UID: \"f7dce65a-ad91-4d5e-b1dd-24570daf4776\") " Jan 27 14:04:30 crc kubenswrapper[4900]: I0127 14:04:30.576459 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f7dce65a-ad91-4d5e-b1dd-24570daf4776-host" (OuterVolumeSpecName: "host") pod "f7dce65a-ad91-4d5e-b1dd-24570daf4776" (UID: "f7dce65a-ad91-4d5e-b1dd-24570daf4776"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 14:04:30 crc kubenswrapper[4900]: I0127 14:04:30.579684 4900 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f7dce65a-ad91-4d5e-b1dd-24570daf4776-host\") on node \"crc\" DevicePath \"\"" Jan 27 14:04:30 crc kubenswrapper[4900]: I0127 14:04:30.599426 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7dce65a-ad91-4d5e-b1dd-24570daf4776-kube-api-access-x4c4x" (OuterVolumeSpecName: "kube-api-access-x4c4x") pod "f7dce65a-ad91-4d5e-b1dd-24570daf4776" (UID: "f7dce65a-ad91-4d5e-b1dd-24570daf4776"). InnerVolumeSpecName "kube-api-access-x4c4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:04:30 crc kubenswrapper[4900]: I0127 14:04:30.682372 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4c4x\" (UniqueName: \"kubernetes.io/projected/f7dce65a-ad91-4d5e-b1dd-24570daf4776-kube-api-access-x4c4x\") on node \"crc\" DevicePath \"\"" Jan 27 14:04:31 crc kubenswrapper[4900]: I0127 14:04:31.238018 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-47j5g/crc-debug-mz8jn" event={"ID":"f7dce65a-ad91-4d5e-b1dd-24570daf4776","Type":"ContainerDied","Data":"eaba55323f7d048f1357592e15cc989529fdd140a5922720a2d726c70c7548b1"} Jan 27 14:04:31 crc kubenswrapper[4900]: I0127 14:04:31.238110 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eaba55323f7d048f1357592e15cc989529fdd140a5922720a2d726c70c7548b1" Jan 27 14:04:31 crc kubenswrapper[4900]: I0127 14:04:31.238223 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/crc-debug-mz8jn" Jan 27 14:04:31 crc kubenswrapper[4900]: I0127 14:04:31.447536 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-47j5g/crc-debug-mz8jn"] Jan 27 14:04:31 crc kubenswrapper[4900]: I0127 14:04:31.461646 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-47j5g/crc-debug-mz8jn"] Jan 27 14:04:32 crc kubenswrapper[4900]: I0127 14:04:32.503387 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7dce65a-ad91-4d5e-b1dd-24570daf4776" path="/var/lib/kubelet/pods/f7dce65a-ad91-4d5e-b1dd-24570daf4776/volumes" Jan 27 14:04:32 crc kubenswrapper[4900]: I0127 14:04:32.654454 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-47j5g/crc-debug-25nvr"] Jan 27 14:04:32 crc kubenswrapper[4900]: E0127 14:04:32.655209 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7dce65a-ad91-4d5e-b1dd-24570daf4776" containerName="container-00" Jan 27 14:04:32 crc kubenswrapper[4900]: I0127 14:04:32.655235 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7dce65a-ad91-4d5e-b1dd-24570daf4776" containerName="container-00" Jan 27 14:04:32 crc kubenswrapper[4900]: I0127 14:04:32.655610 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7dce65a-ad91-4d5e-b1dd-24570daf4776" containerName="container-00" Jan 27 14:04:32 crc kubenswrapper[4900]: I0127 14:04:32.656769 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/crc-debug-25nvr" Jan 27 14:04:32 crc kubenswrapper[4900]: I0127 14:04:32.682186 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42f6m\" (UniqueName: \"kubernetes.io/projected/62d0d13c-d159-407a-b9d2-35a929753dea-kube-api-access-42f6m\") pod \"crc-debug-25nvr\" (UID: \"62d0d13c-d159-407a-b9d2-35a929753dea\") " pod="openshift-must-gather-47j5g/crc-debug-25nvr" Jan 27 14:04:32 crc kubenswrapper[4900]: I0127 14:04:32.682887 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/62d0d13c-d159-407a-b9d2-35a929753dea-host\") pod \"crc-debug-25nvr\" (UID: \"62d0d13c-d159-407a-b9d2-35a929753dea\") " pod="openshift-must-gather-47j5g/crc-debug-25nvr" Jan 27 14:04:32 crc kubenswrapper[4900]: I0127 14:04:32.784580 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42f6m\" (UniqueName: \"kubernetes.io/projected/62d0d13c-d159-407a-b9d2-35a929753dea-kube-api-access-42f6m\") pod \"crc-debug-25nvr\" (UID: \"62d0d13c-d159-407a-b9d2-35a929753dea\") " pod="openshift-must-gather-47j5g/crc-debug-25nvr" Jan 27 14:04:32 crc kubenswrapper[4900]: I0127 14:04:32.784694 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/62d0d13c-d159-407a-b9d2-35a929753dea-host\") pod \"crc-debug-25nvr\" (UID: \"62d0d13c-d159-407a-b9d2-35a929753dea\") " pod="openshift-must-gather-47j5g/crc-debug-25nvr" Jan 27 14:04:32 crc kubenswrapper[4900]: I0127 14:04:32.785041 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/62d0d13c-d159-407a-b9d2-35a929753dea-host\") pod \"crc-debug-25nvr\" (UID: \"62d0d13c-d159-407a-b9d2-35a929753dea\") " pod="openshift-must-gather-47j5g/crc-debug-25nvr" Jan 27 14:04:32 crc kubenswrapper[4900]: I0127 14:04:32.817210 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42f6m\" (UniqueName: \"kubernetes.io/projected/62d0d13c-d159-407a-b9d2-35a929753dea-kube-api-access-42f6m\") pod \"crc-debug-25nvr\" (UID: \"62d0d13c-d159-407a-b9d2-35a929753dea\") " pod="openshift-must-gather-47j5g/crc-debug-25nvr" Jan 27 14:04:32 crc kubenswrapper[4900]: I0127 14:04:32.983517 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/crc-debug-25nvr" Jan 27 14:04:33 crc kubenswrapper[4900]: W0127 14:04:33.026847 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62d0d13c_d159_407a_b9d2_35a929753dea.slice/crio-a53c5ec18362039bb167229e90e22f611c3d745bea8667b65b59f54410b64736 WatchSource:0}: Error finding container a53c5ec18362039bb167229e90e22f611c3d745bea8667b65b59f54410b64736: Status 404 returned error can't find the container with id a53c5ec18362039bb167229e90e22f611c3d745bea8667b65b59f54410b64736 Jan 27 14:04:33 crc kubenswrapper[4900]: I0127 14:04:33.269045 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-47j5g/crc-debug-25nvr" event={"ID":"62d0d13c-d159-407a-b9d2-35a929753dea","Type":"ContainerStarted","Data":"a53c5ec18362039bb167229e90e22f611c3d745bea8667b65b59f54410b64736"} Jan 27 14:04:34 crc kubenswrapper[4900]: I0127 14:04:34.293132 4900 generic.go:334] "Generic (PLEG): container finished" podID="62d0d13c-d159-407a-b9d2-35a929753dea" containerID="70bd25f27b4eb60db2980788d7f9d222ff8b3395f94e68b6b5a28852862c1516" exitCode=0 Jan 27 14:04:34 crc kubenswrapper[4900]: I0127 14:04:34.293290 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-47j5g/crc-debug-25nvr" event={"ID":"62d0d13c-d159-407a-b9d2-35a929753dea","Type":"ContainerDied","Data":"70bd25f27b4eb60db2980788d7f9d222ff8b3395f94e68b6b5a28852862c1516"} Jan 27 14:04:34 crc kubenswrapper[4900]: I0127 14:04:34.346847 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-47j5g/crc-debug-25nvr"] Jan 27 14:04:34 crc kubenswrapper[4900]: I0127 14:04:34.360313 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-47j5g/crc-debug-25nvr"] Jan 27 14:04:35 crc kubenswrapper[4900]: I0127 14:04:35.480593 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/crc-debug-25nvr" Jan 27 14:04:35 crc kubenswrapper[4900]: I0127 14:04:35.596549 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42f6m\" (UniqueName: \"kubernetes.io/projected/62d0d13c-d159-407a-b9d2-35a929753dea-kube-api-access-42f6m\") pod \"62d0d13c-d159-407a-b9d2-35a929753dea\" (UID: \"62d0d13c-d159-407a-b9d2-35a929753dea\") " Jan 27 14:04:35 crc kubenswrapper[4900]: I0127 14:04:35.597160 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/62d0d13c-d159-407a-b9d2-35a929753dea-host\") pod \"62d0d13c-d159-407a-b9d2-35a929753dea\" (UID: \"62d0d13c-d159-407a-b9d2-35a929753dea\") " Jan 27 14:04:35 crc kubenswrapper[4900]: I0127 14:04:35.597235 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62d0d13c-d159-407a-b9d2-35a929753dea-host" (OuterVolumeSpecName: "host") pod "62d0d13c-d159-407a-b9d2-35a929753dea" (UID: "62d0d13c-d159-407a-b9d2-35a929753dea"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 27 14:04:35 crc kubenswrapper[4900]: I0127 14:04:35.598242 4900 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/62d0d13c-d159-407a-b9d2-35a929753dea-host\") on node \"crc\" DevicePath \"\"" Jan 27 14:04:35 crc kubenswrapper[4900]: I0127 14:04:35.606258 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62d0d13c-d159-407a-b9d2-35a929753dea-kube-api-access-42f6m" (OuterVolumeSpecName: "kube-api-access-42f6m") pod "62d0d13c-d159-407a-b9d2-35a929753dea" (UID: "62d0d13c-d159-407a-b9d2-35a929753dea"). InnerVolumeSpecName "kube-api-access-42f6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:04:35 crc kubenswrapper[4900]: I0127 14:04:35.701503 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42f6m\" (UniqueName: \"kubernetes.io/projected/62d0d13c-d159-407a-b9d2-35a929753dea-kube-api-access-42f6m\") on node \"crc\" DevicePath \"\"" Jan 27 14:04:36 crc kubenswrapper[4900]: I0127 14:04:36.323129 4900 scope.go:117] "RemoveContainer" containerID="70bd25f27b4eb60db2980788d7f9d222ff8b3395f94e68b6b5a28852862c1516" Jan 27 14:04:36 crc kubenswrapper[4900]: I0127 14:04:36.323269 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/crc-debug-25nvr" Jan 27 14:04:36 crc kubenswrapper[4900]: I0127 14:04:36.499713 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62d0d13c-d159-407a-b9d2-35a929753dea" path="/var/lib/kubelet/pods/62d0d13c-d159-407a-b9d2-35a929753dea/volumes" Jan 27 14:04:45 crc kubenswrapper[4900]: I0127 14:04:45.481948 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:04:45 crc kubenswrapper[4900]: E0127 14:04:45.483045 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:05:00 crc kubenswrapper[4900]: I0127 14:05:00.485190 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:05:00 crc kubenswrapper[4900]: E0127 14:05:00.486489 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:05:09 crc kubenswrapper[4900]: I0127 14:05:09.862650 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_cf8673eb-231e-4350-891d-c610bf69df5e/aodh-api/0.log" Jan 27 14:05:10 crc kubenswrapper[4900]: I0127 14:05:10.113102 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_cf8673eb-231e-4350-891d-c610bf69df5e/aodh-evaluator/0.log" Jan 27 14:05:10 crc kubenswrapper[4900]: I0127 14:05:10.138655 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_cf8673eb-231e-4350-891d-c610bf69df5e/aodh-notifier/0.log" Jan 27 14:05:10 crc kubenswrapper[4900]: I0127 14:05:10.139518 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_cf8673eb-231e-4350-891d-c610bf69df5e/aodh-listener/0.log" Jan 27 14:05:10 crc kubenswrapper[4900]: I0127 14:05:10.369304 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-c6f4bcb48-jgthx_2550873b-f8d1-4bfe-8155-64f7d0929058/barbican-api/0.log" Jan 27 14:05:10 crc kubenswrapper[4900]: I0127 14:05:10.410370 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-c6f4bcb48-jgthx_2550873b-f8d1-4bfe-8155-64f7d0929058/barbican-api-log/0.log" Jan 27 14:05:10 crc kubenswrapper[4900]: I0127 14:05:10.547435 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-699cfff846-mfrc2_d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c/barbican-keystone-listener/0.log" Jan 27 14:05:10 crc kubenswrapper[4900]: I0127 14:05:10.766453 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-699cfff846-mfrc2_d4b81d2d-c0b7-4dcf-a299-36e1f5d46e5c/barbican-keystone-listener-log/0.log" Jan 27 14:05:10 crc kubenswrapper[4900]: I0127 14:05:10.787910 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-869c8dd855-nf22j_e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb/barbican-worker/0.log" Jan 27 14:05:10 crc kubenswrapper[4900]: I0127 14:05:10.883876 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-869c8dd855-nf22j_e4d5c4a3-5cef-4e81-ac7a-f04c39deb8fb/barbican-worker-log/0.log" Jan 27 14:05:11 crc kubenswrapper[4900]: I0127 14:05:11.035631 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-jcs8m_53908aae-9e96-453f-91e6-f17eeb2ce37a/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:11 crc kubenswrapper[4900]: I0127 14:05:11.205266 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_780131af-30a7-406a-8ae9-b9a3a0826d1e/ceilometer-central-agent/1.log" Jan 27 14:05:11 crc kubenswrapper[4900]: I0127 14:05:11.402013 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_780131af-30a7-406a-8ae9-b9a3a0826d1e/ceilometer-central-agent/0.log" Jan 27 14:05:11 crc kubenswrapper[4900]: I0127 14:05:11.457532 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_780131af-30a7-406a-8ae9-b9a3a0826d1e/ceilometer-notification-agent/0.log" Jan 27 14:05:11 crc kubenswrapper[4900]: I0127 14:05:11.490504 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_780131af-30a7-406a-8ae9-b9a3a0826d1e/sg-core/0.log" Jan 27 14:05:11 crc kubenswrapper[4900]: I0127 14:05:11.542049 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_780131af-30a7-406a-8ae9-b9a3a0826d1e/proxy-httpd/0.log" Jan 27 14:05:11 crc kubenswrapper[4900]: I0127 14:05:11.782583 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_49ffda39-1561-49d2-a67d-ad7da16103b2/cinder-api/0.log" Jan 27 14:05:11 crc kubenswrapper[4900]: I0127 14:05:11.815800 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_49ffda39-1561-49d2-a67d-ad7da16103b2/cinder-api-log/0.log" Jan 27 14:05:12 crc kubenswrapper[4900]: I0127 14:05:12.082333 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_6bf50105-b28f-4123-a6f8-75124e213fcc/cinder-scheduler/1.log" Jan 27 14:05:12 crc kubenswrapper[4900]: I0127 14:05:12.099846 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_6bf50105-b28f-4123-a6f8-75124e213fcc/cinder-scheduler/0.log" Jan 27 14:05:12 crc kubenswrapper[4900]: I0127 14:05:12.131535 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_6bf50105-b28f-4123-a6f8-75124e213fcc/probe/0.log" Jan 27 14:05:12 crc kubenswrapper[4900]: I0127 14:05:12.336374 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-ljmmr_2b37daec-8bf1-4131-aaec-f88c604eb143/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:12 crc kubenswrapper[4900]: I0127 14:05:12.432025 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-sjflv_2ee331bb-6045-48d3-b163-8fcffedf5a0f/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:12 crc kubenswrapper[4900]: I0127 14:05:12.637675 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-z6xsp_b63e2e42-d12b-451e-a055-33abd597ddcd/init/0.log" Jan 27 14:05:12 crc kubenswrapper[4900]: I0127 14:05:12.849414 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-z6xsp_b63e2e42-d12b-451e-a055-33abd597ddcd/init/0.log" Jan 27 14:05:12 crc kubenswrapper[4900]: I0127 14:05:12.931267 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-xssdp_c74db364-6bed-43d8-988a-ed979e4827cf/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:12 crc kubenswrapper[4900]: I0127 14:05:12.980402 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-z6xsp_b63e2e42-d12b-451e-a055-33abd597ddcd/dnsmasq-dns/0.log" Jan 27 14:05:13 crc kubenswrapper[4900]: I0127 14:05:13.183922 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_98607883-1bf0-41e4-a9c1-f41e3d4cf5de/glance-log/0.log" Jan 27 14:05:13 crc kubenswrapper[4900]: I0127 14:05:13.226130 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_98607883-1bf0-41e4-a9c1-f41e3d4cf5de/glance-httpd/0.log" Jan 27 14:05:13 crc kubenswrapper[4900]: I0127 14:05:13.468662 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_21602597-32ce-4d1d-8215-951fd259bc77/glance-log/0.log" Jan 27 14:05:13 crc kubenswrapper[4900]: I0127 14:05:13.494034 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_21602597-32ce-4d1d-8215-951fd259bc77/glance-httpd/0.log" Jan 27 14:05:14 crc kubenswrapper[4900]: I0127 14:05:14.353819 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-857c4f4785-hvkmw_c9bfbc62-f1f1-409e-856c-63d4ee15ea7a/heat-api/0.log" Jan 27 14:05:14 crc kubenswrapper[4900]: I0127 14:05:14.382499 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-vmxqh_8b0ea52c-09c3-4bf6-9c72-ae576a224b72/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:14 crc kubenswrapper[4900]: I0127 14:05:14.549655 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-555ff797c5-bldkh_f43811ef-7c32-49e2-b59f-9b845dd80a4f/heat-engine/0.log" Jan 27 14:05:14 crc kubenswrapper[4900]: I0127 14:05:14.569563 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-5ff866d6bc-dnsvf_023f0a68-6e12-4839-a916-6a08d907a415/heat-cfnapi/0.log" Jan 27 14:05:14 crc kubenswrapper[4900]: I0127 14:05:14.642981 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-v8zx2_992f1195-0ec0-44ed-8d53-6c45b556956c/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:14 crc kubenswrapper[4900]: I0127 14:05:14.977306 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29491981-mdm4z_844ec44e-58d8-4d70-8756-ebb19272cf74/keystone-cron/0.log" Jan 27 14:05:15 crc kubenswrapper[4900]: I0127 14:05:15.048195 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29492041-82dpf_d10a0144-94bd-4c3d-bca4-fe13a7ed6967/keystone-cron/0.log" Jan 27 14:05:15 crc kubenswrapper[4900]: I0127 14:05:15.314032 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cdc8bdcbf-4ltg2_cef6642f-e1e2-460a-9d5d-6d1c797cf79a/keystone-api/0.log" Jan 27 14:05:15 crc kubenswrapper[4900]: I0127 14:05:15.333936 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_49d2fe9a-7b2a-45a1-a0ed-31f0e7ab515b/kube-state-metrics/0.log" Jan 27 14:05:15 crc kubenswrapper[4900]: I0127 14:05:15.419245 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-7k6rc_3a7fbe74-19ba-4e60-9f23-2fb69c8e141d/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:15 crc kubenswrapper[4900]: I0127 14:05:15.482385 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:05:15 crc kubenswrapper[4900]: E0127 14:05:15.483611 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:05:15 crc kubenswrapper[4900]: I0127 14:05:15.616275 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_logging-edpm-deployment-openstack-edpm-ipam-fhqzw_e228f1c7-b0a8-4843-8d1b-7dd5bd2896cd/logging-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:15 crc kubenswrapper[4900]: I0127 14:05:15.948348 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_48f40cde-5734-4689-9d38-0ebcb2099b1b/mysqld-exporter/0.log" Jan 27 14:05:16 crc kubenswrapper[4900]: I0127 14:05:16.154024 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-54df577b4f-lsr4f_d1b3591f-db1d-4d36-b162-a667b95bd5e7/neutron-httpd/0.log" Jan 27 14:05:16 crc kubenswrapper[4900]: I0127 14:05:16.290325 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-54df577b4f-lsr4f_d1b3591f-db1d-4d36-b162-a667b95bd5e7/neutron-api/0.log" Jan 27 14:05:16 crc kubenswrapper[4900]: I0127 14:05:16.305269 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-twrsl_299b7b55-c1ed-48af-9570-91937876ab32/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:17 crc kubenswrapper[4900]: I0127 14:05:17.314749 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5c2eb9fa-5015-4196-a6f3-a01d848d6c67/nova-api-log/0.log" Jan 27 14:05:17 crc kubenswrapper[4900]: I0127 14:05:17.369832 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_752097c1-958f-4f9b-868d-e8ec0136da53/nova-cell0-conductor-conductor/0.log" Jan 27 14:05:17 crc kubenswrapper[4900]: I0127 14:05:17.764963 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5c2eb9fa-5015-4196-a6f3-a01d848d6c67/nova-api-api/0.log" Jan 27 14:05:17 crc kubenswrapper[4900]: I0127 14:05:17.796361 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_8e31cf8e-3c3b-4afa-a114-99ee840e8234/nova-cell1-conductor-conductor/0.log" Jan 27 14:05:17 crc kubenswrapper[4900]: I0127 14:05:17.824571 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_1437f600-6b08-4386-b558-49c7fd39e118/nova-cell1-novncproxy-novncproxy/0.log" Jan 27 14:05:18 crc kubenswrapper[4900]: I0127 14:05:18.115739 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-7xv88_b9439553-d0f2-460f-b661-562e378a4b3c/nova-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:18 crc kubenswrapper[4900]: I0127 14:05:18.186756 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_71a49337-f4cf-48fb-936b-3869052594cd/nova-metadata-log/0.log" Jan 27 14:05:18 crc kubenswrapper[4900]: I0127 14:05:18.791942 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_8782c194-2052-418f-9765-b895062a6fab/nova-scheduler-scheduler/0.log" Jan 27 14:05:18 crc kubenswrapper[4900]: I0127 14:05:18.836089 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_eca1d592-3310-47ed-a815-8f32bc974d9b/mysql-bootstrap/0.log" Jan 27 14:05:19 crc kubenswrapper[4900]: I0127 14:05:19.090460 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_eca1d592-3310-47ed-a815-8f32bc974d9b/mysql-bootstrap/0.log" Jan 27 14:05:19 crc kubenswrapper[4900]: I0127 14:05:19.122984 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_eca1d592-3310-47ed-a815-8f32bc974d9b/galera/0.log" Jan 27 14:05:19 crc kubenswrapper[4900]: I0127 14:05:19.154148 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_eca1d592-3310-47ed-a815-8f32bc974d9b/galera/1.log" Jan 27 14:05:19 crc kubenswrapper[4900]: I0127 14:05:19.442190 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_7626ad91-9f29-4dae-969a-e23d420319ac/mysql-bootstrap/0.log" Jan 27 14:05:19 crc kubenswrapper[4900]: I0127 14:05:19.766448 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_7626ad91-9f29-4dae-969a-e23d420319ac/mysql-bootstrap/0.log" Jan 27 14:05:19 crc kubenswrapper[4900]: I0127 14:05:19.786037 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_7626ad91-9f29-4dae-969a-e23d420319ac/galera/0.log" Jan 27 14:05:19 crc kubenswrapper[4900]: I0127 14:05:19.842843 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_7626ad91-9f29-4dae-969a-e23d420319ac/galera/1.log" Jan 27 14:05:20 crc kubenswrapper[4900]: I0127 14:05:20.020559 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_ded5eaa8-8d7d-4ee2-bad6-62da18024e33/openstackclient/0.log" Jan 27 14:05:20 crc kubenswrapper[4900]: I0127 14:05:20.263319 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-k9z2g_c6fc81ae-1ff8-4fcf-8a03-cf8c1f405d75/ovn-controller/0.log" Jan 27 14:05:20 crc kubenswrapper[4900]: I0127 14:05:20.449450 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-4stm8_0e98cfca-ae2e-4650-a9e8-e21f215546cb/openstack-network-exporter/0.log" Jan 27 14:05:20 crc kubenswrapper[4900]: I0127 14:05:20.688695 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-t4b4c_029e8969-d08d-4909-9409-33f888c56c8c/ovsdb-server-init/0.log" Jan 27 14:05:20 crc kubenswrapper[4900]: I0127 14:05:20.906381 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-t4b4c_029e8969-d08d-4909-9409-33f888c56c8c/ovs-vswitchd/0.log" Jan 27 14:05:20 crc kubenswrapper[4900]: I0127 14:05:20.941230 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_71a49337-f4cf-48fb-936b-3869052594cd/nova-metadata-metadata/0.log" Jan 27 14:05:21 crc kubenswrapper[4900]: I0127 14:05:21.006106 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-t4b4c_029e8969-d08d-4909-9409-33f888c56c8c/ovsdb-server/0.log" Jan 27 14:05:21 crc kubenswrapper[4900]: I0127 14:05:21.009575 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-t4b4c_029e8969-d08d-4909-9409-33f888c56c8c/ovsdb-server-init/0.log" Jan 27 14:05:21 crc kubenswrapper[4900]: I0127 14:05:21.267152 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-vk2zt_c14d3cd9-30a5-4852-aa71-915345161f76/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:21 crc kubenswrapper[4900]: I0127 14:05:21.272547 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_c5578d3a-7a63-42bb-bd05-499cc28ed723/openstack-network-exporter/0.log" Jan 27 14:05:21 crc kubenswrapper[4900]: I0127 14:05:21.472982 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_c5578d3a-7a63-42bb-bd05-499cc28ed723/ovn-northd/0.log" Jan 27 14:05:21 crc kubenswrapper[4900]: I0127 14:05:21.586452 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c76c04d4-a881-4504-a00f-3b227187edfa/openstack-network-exporter/0.log" Jan 27 14:05:21 crc kubenswrapper[4900]: I0127 14:05:21.730444 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c76c04d4-a881-4504-a00f-3b227187edfa/ovsdbserver-nb/0.log" Jan 27 14:05:21 crc kubenswrapper[4900]: I0127 14:05:21.842143 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_6217f66e-2295-46d6-878c-cc9457712a8c/openstack-network-exporter/0.log" Jan 27 14:05:21 crc kubenswrapper[4900]: I0127 14:05:21.982835 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_6217f66e-2295-46d6-878c-cc9457712a8c/ovsdbserver-sb/0.log" Jan 27 14:05:22 crc kubenswrapper[4900]: I0127 14:05:22.249592 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-74d8686b8d-zh4mj_eec75b81-8153-4685-a1bc-826a5abed42b/placement-api/0.log" Jan 27 14:05:22 crc kubenswrapper[4900]: I0127 14:05:22.371397 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3/init-config-reloader/0.log" Jan 27 14:05:22 crc kubenswrapper[4900]: I0127 14:05:22.401444 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-74d8686b8d-zh4mj_eec75b81-8153-4685-a1bc-826a5abed42b/placement-log/0.log" Jan 27 14:05:22 crc kubenswrapper[4900]: I0127 14:05:22.721705 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3/prometheus/0.log" Jan 27 14:05:22 crc kubenswrapper[4900]: I0127 14:05:22.725280 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3/config-reloader/0.log" Jan 27 14:05:22 crc kubenswrapper[4900]: I0127 14:05:22.731950 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3/init-config-reloader/0.log" Jan 27 14:05:22 crc kubenswrapper[4900]: I0127 14:05:22.768014 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_ed32ddcf-11ab-4c82-bbdb-5be752fb6ae3/thanos-sidecar/0.log" Jan 27 14:05:23 crc kubenswrapper[4900]: I0127 14:05:23.052036 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f460e552-e35d-44f9-9041-82280a4a840e/setup-container/0.log" Jan 27 14:05:23 crc kubenswrapper[4900]: I0127 14:05:23.271313 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f460e552-e35d-44f9-9041-82280a4a840e/setup-container/0.log" Jan 27 14:05:23 crc kubenswrapper[4900]: I0127 14:05:23.347856 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d90fa5d6-40e8-4d00-a517-259b0b16f186/setup-container/0.log" Jan 27 14:05:23 crc kubenswrapper[4900]: I0127 14:05:23.348484 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f460e552-e35d-44f9-9041-82280a4a840e/rabbitmq/0.log" Jan 27 14:05:23 crc kubenswrapper[4900]: I0127 14:05:23.729144 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d90fa5d6-40e8-4d00-a517-259b0b16f186/setup-container/0.log" Jan 27 14:05:23 crc kubenswrapper[4900]: I0127 14:05:23.833798 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_d90fa5d6-40e8-4d00-a517-259b0b16f186/rabbitmq/0.log" Jan 27 14:05:23 crc kubenswrapper[4900]: I0127 14:05:23.859302 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc/setup-container/0.log" Jan 27 14:05:24 crc kubenswrapper[4900]: I0127 14:05:24.126294 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc/setup-container/0.log" Jan 27 14:05:24 crc kubenswrapper[4900]: I0127 14:05:24.241714 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_b7437170-ba39-40c1-a876-4860f350d1e6/setup-container/0.log" Jan 27 14:05:24 crc kubenswrapper[4900]: I0127 14:05:24.251490 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-1_b4992f59-80b7-46cd-b0a3-f7cc47f5a1bc/rabbitmq/0.log" Jan 27 14:05:24 crc kubenswrapper[4900]: I0127 14:05:24.566186 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_b7437170-ba39-40c1-a876-4860f350d1e6/setup-container/0.log" Jan 27 14:05:24 crc kubenswrapper[4900]: I0127 14:05:24.580754 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-vlmrv_07b0138c-ca14-45fe-99d6-9474434a1eef/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:24 crc kubenswrapper[4900]: I0127 14:05:24.657737 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-2_b7437170-ba39-40c1-a876-4860f350d1e6/rabbitmq/0.log" Jan 27 14:05:24 crc kubenswrapper[4900]: I0127 14:05:24.861517 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-mbhqv_1cfbfd74-58ba-4625-a563-624a0e53d3c2/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:25 crc kubenswrapper[4900]: I0127 14:05:25.000902 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-pvfh7_cb831e66-cd2f-4a9f-8a72-385490b13aa3/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:25 crc kubenswrapper[4900]: I0127 14:05:25.207907 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-wf5hw_995aa162-c9a7-47ef-8ad2-6796321e6306/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:25 crc kubenswrapper[4900]: I0127 14:05:25.322303 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-26xlc_dc1dd1b1-c791-41c0-b30b-636aa3962cc4/ssh-known-hosts-edpm-deployment/0.log" Jan 27 14:05:25 crc kubenswrapper[4900]: I0127 14:05:25.668322 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7cb9669c67-gp7qj_668f25a4-c69e-4696-849d-166f82f28d00/proxy-server/0.log" Jan 27 14:05:25 crc kubenswrapper[4900]: I0127 14:05:25.880188 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-dzlzf_33ba5ed7-2f73-4fda-94cb-568e6a8c9843/swift-ring-rebalance/0.log" Jan 27 14:05:25 crc kubenswrapper[4900]: I0127 14:05:25.885607 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7cb9669c67-gp7qj_668f25a4-c69e-4696-849d-166f82f28d00/proxy-httpd/0.log" Jan 27 14:05:26 crc kubenswrapper[4900]: I0127 14:05:26.064665 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/account-auditor/0.log" Jan 27 14:05:26 crc kubenswrapper[4900]: I0127 14:05:26.164402 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/account-reaper/0.log" Jan 27 14:05:26 crc kubenswrapper[4900]: I0127 14:05:26.266584 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/account-replicator/0.log" Jan 27 14:05:26 crc kubenswrapper[4900]: I0127 14:05:26.357255 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/account-server/0.log" Jan 27 14:05:27 crc kubenswrapper[4900]: I0127 14:05:27.285138 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/container-auditor/0.log" Jan 27 14:05:27 crc kubenswrapper[4900]: I0127 14:05:27.286137 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/container-server/0.log" Jan 27 14:05:27 crc kubenswrapper[4900]: I0127 14:05:27.319246 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/container-updater/0.log" Jan 27 14:05:27 crc kubenswrapper[4900]: I0127 14:05:27.334923 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/container-replicator/0.log" Jan 27 14:05:27 crc kubenswrapper[4900]: I0127 14:05:27.550873 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/object-expirer/0.log" Jan 27 14:05:27 crc kubenswrapper[4900]: I0127 14:05:27.587201 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/object-auditor/0.log" Jan 27 14:05:27 crc kubenswrapper[4900]: I0127 14:05:27.680846 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/object-replicator/0.log" Jan 27 14:05:27 crc kubenswrapper[4900]: I0127 14:05:27.720225 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/object-server/0.log" Jan 27 14:05:27 crc kubenswrapper[4900]: I0127 14:05:27.787673 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/object-updater/0.log" Jan 27 14:05:27 crc kubenswrapper[4900]: I0127 14:05:27.992219 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/swift-recon-cron/0.log" Jan 27 14:05:28 crc kubenswrapper[4900]: I0127 14:05:28.007214 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_0c2f90a4-baa0-4eeb-a797-3664c306818b/rsync/0.log" Jan 27 14:05:28 crc kubenswrapper[4900]: I0127 14:05:28.271846 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-4f94g_179a88b6-3ce1-497d-9746-da99584ba03b/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:28 crc kubenswrapper[4900]: I0127 14:05:28.508721 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-power-monitoring-edpm-deployment-openstack-edpm-qcpfl_e20161f2-b4d0-4e63-b7b7-7c359fef99a0/telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:28 crc kubenswrapper[4900]: I0127 14:05:28.558920 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_e4ae9bbe-9854-4320-9415-2a894eda782e/memcached/0.log" Jan 27 14:05:28 crc kubenswrapper[4900]: I0127 14:05:28.834884 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_5bd0ef1b-2a03-4069-ab40-fa57d5720373/test-operator-logs-container/0.log" Jan 27 14:05:28 crc kubenswrapper[4900]: I0127 14:05:28.870691 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_ac88ca80-18bc-417b-8a7d-5ca2666524e3/tempest-tests-tempest-tests-runner/0.log" Jan 27 14:05:28 crc kubenswrapper[4900]: I0127 14:05:28.915950 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-cnh6h_8d42f237-ff22-4ec0-9897-1da911518528/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 27 14:05:30 crc kubenswrapper[4900]: I0127 14:05:30.482488 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:05:30 crc kubenswrapper[4900]: E0127 14:05:30.483388 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:05:43 crc kubenswrapper[4900]: I0127 14:05:43.482233 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:05:43 crc kubenswrapper[4900]: E0127 14:05:43.483565 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:05:55 crc kubenswrapper[4900]: I0127 14:05:55.484657 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:05:55 crc kubenswrapper[4900]: E0127 14:05:55.485966 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:06:06 crc kubenswrapper[4900]: I0127 14:06:06.352148 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-65ff799cfd-z72lx_e3cba13f-5396-4c71-8f81-d2d932baca1f/manager/0.log" Jan 27 14:06:06 crc kubenswrapper[4900]: I0127 14:06:06.572519 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw_0cccb4df-e43d-41b1-9c22-98a6d24536ac/util/0.log" Jan 27 14:06:06 crc kubenswrapper[4900]: I0127 14:06:06.741334 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw_0cccb4df-e43d-41b1-9c22-98a6d24536ac/util/0.log" Jan 27 14:06:06 crc kubenswrapper[4900]: I0127 14:06:06.789036 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw_0cccb4df-e43d-41b1-9c22-98a6d24536ac/pull/0.log" Jan 27 14:06:06 crc kubenswrapper[4900]: I0127 14:06:06.813560 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw_0cccb4df-e43d-41b1-9c22-98a6d24536ac/pull/0.log" Jan 27 14:06:07 crc kubenswrapper[4900]: I0127 14:06:07.081620 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw_0cccb4df-e43d-41b1-9c22-98a6d24536ac/extract/0.log" Jan 27 14:06:07 crc kubenswrapper[4900]: I0127 14:06:07.091907 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw_0cccb4df-e43d-41b1-9c22-98a6d24536ac/util/0.log" Jan 27 14:06:07 crc kubenswrapper[4900]: I0127 14:06:07.116568 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ca5be714a38df9a549fe388a07be5d20e731b2c24b48f00113d86a12afp8cpw_0cccb4df-e43d-41b1-9c22-98a6d24536ac/pull/0.log" Jan 27 14:06:07 crc kubenswrapper[4900]: I0127 14:06:07.387947 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-655bf9cfbb-7wh5z_1d49f3c7-1746-4b6d-b5f7-c13b2c73dcbc/manager/0.log" Jan 27 14:06:07 crc kubenswrapper[4900]: I0127 14:06:07.429301 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-77554cdc5c-jxgdh_b094071d-c368-40e6-8515-a17d0a22a868/manager/0.log" Jan 27 14:06:07 crc kubenswrapper[4900]: I0127 14:06:07.757279 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-67dd55ff59-dl6ln_e8b4a268-6430-4f23-bd93-aa62b52710a6/manager/0.log" Jan 27 14:06:08 crc kubenswrapper[4900]: I0127 14:06:08.104525 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-575ffb885b-wvhzp_5155088c-b873-4fac-b1e9-87f57c2fae68/manager/0.log" Jan 27 14:06:08 crc kubenswrapper[4900]: I0127 14:06:08.286780 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-82q5c_175105c2-dfc2-4752-bf75-a027d86dc373/manager/0.log" Jan 27 14:06:08 crc kubenswrapper[4900]: I0127 14:06:08.482900 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:06:08 crc kubenswrapper[4900]: E0127 14:06:08.483464 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:06:08 crc kubenswrapper[4900]: I0127 14:06:08.760250 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-768b776ffb-2cndf_4715fe70-acab-4dea-adde-68e1a6e8cb28/manager/0.log" Jan 27 14:06:08 crc kubenswrapper[4900]: I0127 14:06:08.842336 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-7d75bc88d5-6xhcl_76f1d09b-01aa-4c81-b568-8ffb58182475/manager/0.log" Jan 27 14:06:08 crc kubenswrapper[4900]: I0127 14:06:08.909432 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-55f684fd56-ns96g_e8365f5d-b2f2-4cab-a803-e722c65ae307/manager/0.log" Jan 27 14:06:09 crc kubenswrapper[4900]: I0127 14:06:09.058456 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-849fcfbb6b-9m826_4967ec79-a9dd-438a-9cb7-b89b3af09ff5/manager/0.log" Jan 27 14:06:09 crc kubenswrapper[4900]: I0127 14:06:09.268792 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-6b9fb5fdcb-4jp2r_a6dce274-9090-44fc-ac6b-6e164e5b7192/manager/0.log" Jan 27 14:06:09 crc kubenswrapper[4900]: I0127 14:06:09.558807 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7ffd8d76d4-xfh5t_5104a740-a23d-4ea4-a186-97768d490075/manager/0.log" Jan 27 14:06:09 crc kubenswrapper[4900]: I0127 14:06:09.633480 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-fbd766fb6-25hgc_a988e8ab-311d-4b6a-a75e-c49601a77d46/manager/0.log" Jan 27 14:06:09 crc kubenswrapper[4900]: I0127 14:06:09.955919 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7875d7675-j95fk_65b8356b-f64f-4cb8-94af-6b8d45448a63/manager/0.log" Jan 27 14:06:10 crc kubenswrapper[4900]: I0127 14:06:10.029173 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b85462bq5_899811c4-fce0-42df-b3e7-9b1495cad676/manager/1.log" Jan 27 14:06:10 crc kubenswrapper[4900]: I0127 14:06:10.157025 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b85462bq5_899811c4-fce0-42df-b3e7-9b1495cad676/manager/0.log" Jan 27 14:06:10 crc kubenswrapper[4900]: I0127 14:06:10.559277 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7c8f46b9cc-4h24l_e08773f7-5eaf-4a76-b671-0681c02a3471/manager/1.log" Jan 27 14:06:10 crc kubenswrapper[4900]: I0127 14:06:10.569994 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-d8fd5ccf5-5h9ll_f3cc1727-3d00-43f4-92c3-5ef428297727/operator/0.log" Jan 27 14:06:10 crc kubenswrapper[4900]: I0127 14:06:10.956977 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-xvszg_147d12f6-3180-41d8-92c9-55aab763d313/registry-server/0.log" Jan 27 14:06:10 crc kubenswrapper[4900]: I0127 14:06:10.969517 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-xvszg_147d12f6-3180-41d8-92c9-55aab763d313/registry-server/1.log" Jan 27 14:06:11 crc kubenswrapper[4900]: I0127 14:06:11.241335 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-79d5ccc684-rdllj_70b6c48f-4c95-468f-a792-abe4e318948f/manager/0.log" Jan 27 14:06:11 crc kubenswrapper[4900]: I0127 14:06:11.255114 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-6f75f45d54-fkq26_0c0782a0-6d83-4760-82dd-cea358647713/manager/0.log" Jan 27 14:06:11 crc kubenswrapper[4900]: I0127 14:06:11.569021 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-92ln6_a9e9714e-647d-42a9-9073-1cbd72a6b647/operator/0.log" Jan 27 14:06:11 crc kubenswrapper[4900]: I0127 14:06:11.668250 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-547cbdb99f-dkghw_5d74e7b5-7dcd-4edc-9b82-1dea0a9570a7/manager/0.log" Jan 27 14:06:12 crc kubenswrapper[4900]: I0127 14:06:12.558513 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-69797bbcbd-krlqc_5d4cc48d-12ab-458e-bf29-bc87a182f5c3/manager/0.log" Jan 27 14:06:12 crc kubenswrapper[4900]: I0127 14:06:12.610029 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-659968c8f5-zkwns_1759ba9c-7c4a-4380-81f5-e67d8e418fa1/manager/0.log" Jan 27 14:06:12 crc kubenswrapper[4900]: I0127 14:06:12.649231 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7c8f46b9cc-4h24l_e08773f7-5eaf-4a76-b671-0681c02a3471/manager/0.log" Jan 27 14:06:12 crc kubenswrapper[4900]: I0127 14:06:12.708257 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-7579fb95dd-5zsrz_be0258a0-aba9-4900-b507-4767b2726a69/manager/0.log" Jan 27 14:06:22 crc kubenswrapper[4900]: I0127 14:06:22.482903 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:06:22 crc kubenswrapper[4900]: E0127 14:06:22.484116 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:06:34 crc kubenswrapper[4900]: I0127 14:06:34.482809 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:06:34 crc kubenswrapper[4900]: E0127 14:06:34.483903 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:06:43 crc kubenswrapper[4900]: I0127 14:06:43.816517 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-nnzkd_f4d2ff4d-7d50-461f-8ea3-57fdd1be7214/control-plane-machine-set-operator/0.log" Jan 27 14:06:45 crc kubenswrapper[4900]: I0127 14:06:44.793527 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:06:45 crc kubenswrapper[4900]: I0127 14:06:44.794227 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:06:45 crc kubenswrapper[4900]: I0127 14:06:45.482761 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:06:45 crc kubenswrapper[4900]: E0127 14:06:45.483619 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:06:45 crc kubenswrapper[4900]: I0127 14:06:45.969987 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wrmdd_55206b88-053e-4616-bfb0-82f5d8a2d4f9/kube-rbac-proxy/0.log" Jan 27 14:06:46 crc kubenswrapper[4900]: I0127 14:06:46.007337 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wrmdd_55206b88-053e-4616-bfb0-82f5d8a2d4f9/machine-api-operator/0.log" Jan 27 14:06:59 crc kubenswrapper[4900]: I0127 14:06:59.482528 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:06:59 crc kubenswrapper[4900]: E0127 14:06:59.483658 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:07:04 crc kubenswrapper[4900]: I0127 14:07:04.960908 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-vpcz2_149bd65b-2a7d-4d05-9dfc-6214043f664e/cert-manager-controller/0.log" Jan 27 14:07:05 crc kubenswrapper[4900]: I0127 14:07:05.312777 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-4f2kx_c35e074b-0e8e-4d1f-8d2c-5c23cf320f25/cert-manager-webhook/0.log" Jan 27 14:07:05 crc kubenswrapper[4900]: I0127 14:07:05.324213 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-zsgft_10917301-3371-4ce8-9095-dd86dd3d8d70/cert-manager-cainjector/0.log" Jan 27 14:07:10 crc kubenswrapper[4900]: I0127 14:07:10.482873 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:07:10 crc kubenswrapper[4900]: E0127 14:07:10.484231 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:07:21 crc kubenswrapper[4900]: I0127 14:07:21.481832 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:07:21 crc kubenswrapper[4900]: E0127 14:07:21.483019 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:07:23 crc kubenswrapper[4900]: I0127 14:07:23.280861 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-mrgks_c4f31091-fba1-4fcb-9dd8-929b07b8fc42/nmstate-console-plugin/0.log" Jan 27 14:07:23 crc kubenswrapper[4900]: I0127 14:07:23.535452 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-wgnlg_e78c9b0a-3d34-4f6c-9c65-0ae63482fff7/nmstate-handler/0.log" Jan 27 14:07:23 crc kubenswrapper[4900]: I0127 14:07:23.603292 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-cd2qx_e2425ef6-315b-4e1c-8004-27599038a670/kube-rbac-proxy/0.log" Jan 27 14:07:23 crc kubenswrapper[4900]: I0127 14:07:23.677890 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-cd2qx_e2425ef6-315b-4e1c-8004-27599038a670/nmstate-metrics/0.log" Jan 27 14:07:23 crc kubenswrapper[4900]: I0127 14:07:23.857379 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-b9gpn_e0e01a34-8c1e-46a7-b427-7cbe320855fe/nmstate-operator/0.log" Jan 27 14:07:23 crc kubenswrapper[4900]: I0127 14:07:23.931767 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-82kkf_b1058137-9f30-4107-a5a2-1a2edf16cbce/nmstate-webhook/0.log" Jan 27 14:07:35 crc kubenswrapper[4900]: I0127 14:07:35.483658 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:07:36 crc kubenswrapper[4900]: I0127 14:07:36.208191 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"a245cd31603c7c9b66e0275ef2c47daca5fdd40177866c50f2678cd9bc819eef"} Jan 27 14:07:41 crc kubenswrapper[4900]: I0127 14:07:41.502900 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-849c99c676-jbpgt_386bc10f-9e5d-49d0-9906-e97f1796d49d/kube-rbac-proxy/0.log" Jan 27 14:07:41 crc kubenswrapper[4900]: I0127 14:07:41.552000 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-849c99c676-jbpgt_386bc10f-9e5d-49d0-9906-e97f1796d49d/manager/0.log" Jan 27 14:07:58 crc kubenswrapper[4900]: I0127 14:07:58.919100 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-76fmn_61aa23bf-0ece-4bf6-a963-542bd8b399c6/prometheus-operator/0.log" Jan 27 14:07:59 crc kubenswrapper[4900]: I0127 14:07:59.128042 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5f87745447-mhmkk_25cea2e4-2822-42b8-bd98-6a7f99e69c75/prometheus-operator-admission-webhook/0.log" Jan 27 14:07:59 crc kubenswrapper[4900]: I0127 14:07:59.195741 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5f87745447-sn6js_70cde964-7f6b-42e4-83f2-87e67664e70c/prometheus-operator-admission-webhook/0.log" Jan 27 14:07:59 crc kubenswrapper[4900]: I0127 14:07:59.422343 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-p424v_6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990/operator/0.log" Jan 27 14:07:59 crc kubenswrapper[4900]: I0127 14:07:59.479698 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-66cbf594b5-r84zs_d6f43148-e0ec-452e-b55f-a6bf0c4d5b37/observability-ui-dashboards/0.log" Jan 27 14:07:59 crc kubenswrapper[4900]: I0127 14:07:59.654455 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-cbtnw_b925e9c1-ac78-41d5-a783-88a95ae66df6/perses-operator/0.log" Jan 27 14:08:19 crc kubenswrapper[4900]: I0127 14:08:19.110754 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-79cf69ddc8-kx46n_f00a0f13-11d5-4ee7-9276-21722a3ce14f/cluster-logging-operator/0.log" Jan 27 14:08:19 crc kubenswrapper[4900]: I0127 14:08:19.423773 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-b68pd_3124b61f-99eb-425d-a3c8-7d69f32e1dd0/collector/0.log" Jan 27 14:08:19 crc kubenswrapper[4900]: I0127 14:08:19.542679 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_904dcfed-5ddb-4cb9-bac8-8feb64b3bab4/loki-compactor/0.log" Jan 27 14:08:19 crc kubenswrapper[4900]: I0127 14:08:19.728999 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-5f678c8dd6-k62tn_bf3e0b5e-77aa-4f51-9cca-149e20525f8f/loki-distributor/0.log" Jan 27 14:08:19 crc kubenswrapper[4900]: I0127 14:08:19.801029 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7dbfd5bb68-zmqn9_4de6c1e3-c4c6-47f9-951f-b07adc7744cf/gateway/0.log" Jan 27 14:08:19 crc kubenswrapper[4900]: I0127 14:08:19.965807 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7dbfd5bb68-zmqn9_4de6c1e3-c4c6-47f9-951f-b07adc7744cf/opa/0.log" Jan 27 14:08:20 crc kubenswrapper[4900]: I0127 14:08:20.053605 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7dbfd5bb68-zslxm_8d06d09a-f602-4b44-a4d0-2566d02321df/gateway/0.log" Jan 27 14:08:20 crc kubenswrapper[4900]: I0127 14:08:20.111335 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7dbfd5bb68-zslxm_8d06d09a-f602-4b44-a4d0-2566d02321df/opa/0.log" Jan 27 14:08:20 crc kubenswrapper[4900]: I0127 14:08:20.286946 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_7e0b7978-27f2-42e9-8116-59384da3719b/loki-index-gateway/0.log" Jan 27 14:08:20 crc kubenswrapper[4900]: I0127 14:08:20.439704 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_e780efe8-7578-4940-b01c-c199f36d6554/loki-ingester/0.log" Jan 27 14:08:20 crc kubenswrapper[4900]: I0127 14:08:20.531978 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-76788598db-rg7hv_f1cfe76c-2aba-4da6-a7a7-fa01e883cb60/loki-querier/0.log" Jan 27 14:08:20 crc kubenswrapper[4900]: I0127 14:08:20.693507 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-69d9546745-mlj7c_62fdb605-a4e3-443d-9887-1ebc8218908f/loki-query-frontend/0.log" Jan 27 14:08:38 crc kubenswrapper[4900]: I0127 14:08:38.165273 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-7dbg4_b047f3e7-1d76-487b-96a3-ff81b159ae95/kube-rbac-proxy/0.log" Jan 27 14:08:38 crc kubenswrapper[4900]: I0127 14:08:38.312800 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-7dbg4_b047f3e7-1d76-487b-96a3-ff81b159ae95/controller/0.log" Jan 27 14:08:38 crc kubenswrapper[4900]: I0127 14:08:38.479355 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/cp-frr-files/0.log" Jan 27 14:08:38 crc kubenswrapper[4900]: I0127 14:08:38.770356 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/cp-reloader/0.log" Jan 27 14:08:38 crc kubenswrapper[4900]: I0127 14:08:38.800035 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/cp-frr-files/0.log" Jan 27 14:08:38 crc kubenswrapper[4900]: I0127 14:08:38.821573 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/cp-reloader/0.log" Jan 27 14:08:38 crc kubenswrapper[4900]: I0127 14:08:38.871644 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/cp-metrics/0.log" Jan 27 14:08:39 crc kubenswrapper[4900]: I0127 14:08:39.024838 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/cp-metrics/0.log" Jan 27 14:08:39 crc kubenswrapper[4900]: I0127 14:08:39.082316 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/cp-frr-files/0.log" Jan 27 14:08:39 crc kubenswrapper[4900]: I0127 14:08:39.171184 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/cp-reloader/0.log" Jan 27 14:08:39 crc kubenswrapper[4900]: I0127 14:08:39.210719 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/cp-metrics/0.log" Jan 27 14:08:39 crc kubenswrapper[4900]: I0127 14:08:39.493641 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/cp-frr-files/0.log" Jan 27 14:08:39 crc kubenswrapper[4900]: I0127 14:08:39.549416 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/cp-metrics/0.log" Jan 27 14:08:39 crc kubenswrapper[4900]: I0127 14:08:39.571323 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/cp-reloader/0.log" Jan 27 14:08:39 crc kubenswrapper[4900]: I0127 14:08:39.588453 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/controller/1.log" Jan 27 14:08:39 crc kubenswrapper[4900]: I0127 14:08:39.846818 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/controller/0.log" Jan 27 14:08:39 crc kubenswrapper[4900]: I0127 14:08:39.945741 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/frr/1.log" Jan 27 14:08:39 crc kubenswrapper[4900]: I0127 14:08:39.955105 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/frr-metrics/0.log" Jan 27 14:08:40 crc kubenswrapper[4900]: I0127 14:08:40.121958 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/kube-rbac-proxy/0.log" Jan 27 14:08:40 crc kubenswrapper[4900]: I0127 14:08:40.250247 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/reloader/0.log" Jan 27 14:08:40 crc kubenswrapper[4900]: I0127 14:08:40.285820 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/kube-rbac-proxy-frr/0.log" Jan 27 14:08:40 crc kubenswrapper[4900]: I0127 14:08:40.548208 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-9blf4_8031fe9b-6753-4ab7-abac-fece10fd066b/frr-k8s-webhook-server/1.log" Jan 27 14:08:40 crc kubenswrapper[4900]: I0127 14:08:40.708836 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-9blf4_8031fe9b-6753-4ab7-abac-fece10fd066b/frr-k8s-webhook-server/0.log" Jan 27 14:08:41 crc kubenswrapper[4900]: I0127 14:08:41.011075 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-694495f969-v6psl_88d8052f-1988-4229-abc5-100335ed01e2/manager/0.log" Jan 27 14:08:41 crc kubenswrapper[4900]: I0127 14:08:41.170457 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-844499dc88-f72ld_0d203d65-c23c-4e25-b72b-7b5a69441b5f/webhook-server/1.log" Jan 27 14:08:41 crc kubenswrapper[4900]: I0127 14:08:41.373616 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-844499dc88-f72ld_0d203d65-c23c-4e25-b72b-7b5a69441b5f/webhook-server/0.log" Jan 27 14:08:41 crc kubenswrapper[4900]: I0127 14:08:41.759636 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-8mc5v_7513569c-d113-4de0-8d1c-734db1c14659/kube-rbac-proxy/0.log" Jan 27 14:08:42 crc kubenswrapper[4900]: I0127 14:08:42.402949 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-gnhhx_ec5f276d-77b1-4fa8-b00b-7230c546a47f/frr/0.log" Jan 27 14:08:42 crc kubenswrapper[4900]: I0127 14:08:42.422179 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-8mc5v_7513569c-d113-4de0-8d1c-734db1c14659/speaker/1.log" Jan 27 14:08:42 crc kubenswrapper[4900]: I0127 14:08:42.835320 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-8mc5v_7513569c-d113-4de0-8d1c-734db1c14659/speaker/0.log" Jan 27 14:09:01 crc kubenswrapper[4900]: I0127 14:09:01.706466 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x_71212718-968f-4f4c-84e7-83c0e34b6597/util/0.log" Jan 27 14:09:02 crc kubenswrapper[4900]: I0127 14:09:02.021178 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x_71212718-968f-4f4c-84e7-83c0e34b6597/util/0.log" Jan 27 14:09:02 crc kubenswrapper[4900]: I0127 14:09:02.051110 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x_71212718-968f-4f4c-84e7-83c0e34b6597/pull/0.log" Jan 27 14:09:02 crc kubenswrapper[4900]: I0127 14:09:02.097035 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x_71212718-968f-4f4c-84e7-83c0e34b6597/pull/0.log" Jan 27 14:09:02 crc kubenswrapper[4900]: I0127 14:09:02.261842 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x_71212718-968f-4f4c-84e7-83c0e34b6597/util/0.log" Jan 27 14:09:02 crc kubenswrapper[4900]: I0127 14:09:02.291378 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x_71212718-968f-4f4c-84e7-83c0e34b6597/pull/0.log" Jan 27 14:09:02 crc kubenswrapper[4900]: I0127 14:09:02.328849 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a2fz72x_71212718-968f-4f4c-84e7-83c0e34b6597/extract/0.log" Jan 27 14:09:02 crc kubenswrapper[4900]: I0127 14:09:02.570279 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t_4c53630b-90cf-4691-9746-161271db745f/util/0.log" Jan 27 14:09:02 crc kubenswrapper[4900]: I0127 14:09:02.729649 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t_4c53630b-90cf-4691-9746-161271db745f/util/0.log" Jan 27 14:09:02 crc kubenswrapper[4900]: I0127 14:09:02.786323 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t_4c53630b-90cf-4691-9746-161271db745f/pull/0.log" Jan 27 14:09:02 crc kubenswrapper[4900]: I0127 14:09:02.890192 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t_4c53630b-90cf-4691-9746-161271db745f/pull/0.log" Jan 27 14:09:03 crc kubenswrapper[4900]: I0127 14:09:03.180377 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t_4c53630b-90cf-4691-9746-161271db745f/extract/0.log" Jan 27 14:09:03 crc kubenswrapper[4900]: I0127 14:09:03.180483 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t_4c53630b-90cf-4691-9746-161271db745f/pull/0.log" Jan 27 14:09:03 crc kubenswrapper[4900]: I0127 14:09:03.180587 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dchpm7t_4c53630b-90cf-4691-9746-161271db745f/util/0.log" Jan 27 14:09:03 crc kubenswrapper[4900]: I0127 14:09:03.487525 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p_fec10fd5-c022-46d6-bc89-53e69e2c0b40/util/0.log" Jan 27 14:09:03 crc kubenswrapper[4900]: I0127 14:09:03.684566 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p_fec10fd5-c022-46d6-bc89-53e69e2c0b40/util/0.log" Jan 27 14:09:03 crc kubenswrapper[4900]: I0127 14:09:03.756074 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p_fec10fd5-c022-46d6-bc89-53e69e2c0b40/pull/0.log" Jan 27 14:09:03 crc kubenswrapper[4900]: I0127 14:09:03.756149 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p_fec10fd5-c022-46d6-bc89-53e69e2c0b40/pull/0.log" Jan 27 14:09:03 crc kubenswrapper[4900]: I0127 14:09:03.965450 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p_fec10fd5-c022-46d6-bc89-53e69e2c0b40/pull/0.log" Jan 27 14:09:04 crc kubenswrapper[4900]: I0127 14:09:04.002812 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p_fec10fd5-c022-46d6-bc89-53e69e2c0b40/util/0.log" Jan 27 14:09:04 crc kubenswrapper[4900]: I0127 14:09:04.792906 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:09:04 crc kubenswrapper[4900]: I0127 14:09:04.796767 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:09:05 crc kubenswrapper[4900]: I0127 14:09:05.064008 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bdgb4p_fec10fd5-c022-46d6-bc89-53e69e2c0b40/extract/0.log" Jan 27 14:09:05 crc kubenswrapper[4900]: I0127 14:09:05.097842 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr_62bc015b-4274-4927-94d3-b5c2519e4a72/util/0.log" Jan 27 14:09:05 crc kubenswrapper[4900]: I0127 14:09:05.351665 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr_62bc015b-4274-4927-94d3-b5c2519e4a72/util/0.log" Jan 27 14:09:05 crc kubenswrapper[4900]: I0127 14:09:05.387760 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr_62bc015b-4274-4927-94d3-b5c2519e4a72/pull/0.log" Jan 27 14:09:05 crc kubenswrapper[4900]: I0127 14:09:05.456224 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr_62bc015b-4274-4927-94d3-b5c2519e4a72/pull/0.log" Jan 27 14:09:05 crc kubenswrapper[4900]: I0127 14:09:05.640965 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr_62bc015b-4274-4927-94d3-b5c2519e4a72/extract/0.log" Jan 27 14:09:05 crc kubenswrapper[4900]: I0127 14:09:05.642916 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr_62bc015b-4274-4927-94d3-b5c2519e4a72/util/0.log" Jan 27 14:09:05 crc kubenswrapper[4900]: I0127 14:09:05.700163 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713kjgcr_62bc015b-4274-4927-94d3-b5c2519e4a72/pull/0.log" Jan 27 14:09:05 crc kubenswrapper[4900]: I0127 14:09:05.902450 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx_fa0b888e-d846-473e-a436-c1e24be0e115/util/0.log" Jan 27 14:09:06 crc kubenswrapper[4900]: I0127 14:09:06.101118 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx_fa0b888e-d846-473e-a436-c1e24be0e115/util/0.log" Jan 27 14:09:06 crc kubenswrapper[4900]: I0127 14:09:06.160798 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx_fa0b888e-d846-473e-a436-c1e24be0e115/pull/0.log" Jan 27 14:09:06 crc kubenswrapper[4900]: I0127 14:09:06.177542 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx_fa0b888e-d846-473e-a436-c1e24be0e115/pull/0.log" Jan 27 14:09:06 crc kubenswrapper[4900]: I0127 14:09:06.436246 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx_fa0b888e-d846-473e-a436-c1e24be0e115/pull/0.log" Jan 27 14:09:06 crc kubenswrapper[4900]: I0127 14:09:06.450892 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx_fa0b888e-d846-473e-a436-c1e24be0e115/util/0.log" Jan 27 14:09:06 crc kubenswrapper[4900]: I0127 14:09:06.465344 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rvpgx_fa0b888e-d846-473e-a436-c1e24be0e115/extract/0.log" Jan 27 14:09:06 crc kubenswrapper[4900]: I0127 14:09:06.528202 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-dkmsz_d6060c2f-7323-4d7a-9278-500fae84459b/extract-utilities/0.log" Jan 27 14:09:06 crc kubenswrapper[4900]: I0127 14:09:06.735208 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-dkmsz_d6060c2f-7323-4d7a-9278-500fae84459b/extract-utilities/0.log" Jan 27 14:09:06 crc kubenswrapper[4900]: I0127 14:09:06.735954 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-dkmsz_d6060c2f-7323-4d7a-9278-500fae84459b/extract-content/0.log" Jan 27 14:09:06 crc kubenswrapper[4900]: I0127 14:09:06.740324 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-dkmsz_d6060c2f-7323-4d7a-9278-500fae84459b/extract-content/0.log" Jan 27 14:09:08 crc kubenswrapper[4900]: I0127 14:09:08.152002 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-dkmsz_d6060c2f-7323-4d7a-9278-500fae84459b/extract-utilities/0.log" Jan 27 14:09:08 crc kubenswrapper[4900]: I0127 14:09:08.179177 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-dkmsz_d6060c2f-7323-4d7a-9278-500fae84459b/extract-content/0.log" Jan 27 14:09:08 crc kubenswrapper[4900]: I0127 14:09:08.180253 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkzmp_008df9f5-f660-4c50-b9d1-adf18fa073d1/extract-utilities/0.log" Jan 27 14:09:08 crc kubenswrapper[4900]: I0127 14:09:08.469249 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkzmp_008df9f5-f660-4c50-b9d1-adf18fa073d1/extract-content/0.log" Jan 27 14:09:08 crc kubenswrapper[4900]: I0127 14:09:08.509086 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkzmp_008df9f5-f660-4c50-b9d1-adf18fa073d1/extract-utilities/0.log" Jan 27 14:09:08 crc kubenswrapper[4900]: I0127 14:09:08.577301 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkzmp_008df9f5-f660-4c50-b9d1-adf18fa073d1/extract-content/0.log" Jan 27 14:09:08 crc kubenswrapper[4900]: I0127 14:09:08.664565 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-dkmsz_d6060c2f-7323-4d7a-9278-500fae84459b/registry-server/0.log" Jan 27 14:09:08 crc kubenswrapper[4900]: I0127 14:09:08.787148 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkzmp_008df9f5-f660-4c50-b9d1-adf18fa073d1/extract-utilities/0.log" Jan 27 14:09:08 crc kubenswrapper[4900]: I0127 14:09:08.810263 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkzmp_008df9f5-f660-4c50-b9d1-adf18fa073d1/extract-content/0.log" Jan 27 14:09:08 crc kubenswrapper[4900]: I0127 14:09:08.975346 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-9vrn8_54ddde5c-b5ea-47c1-8ef5-f697d7319c6b/marketplace-operator/0.log" Jan 27 14:09:09 crc kubenswrapper[4900]: I0127 14:09:09.126144 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-k2gzb_50203e3a-7094-487f-9d3b-a9467363dfaf/extract-utilities/0.log" Jan 27 14:09:09 crc kubenswrapper[4900]: I0127 14:09:09.362716 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-k2gzb_50203e3a-7094-487f-9d3b-a9467363dfaf/extract-content/0.log" Jan 27 14:09:09 crc kubenswrapper[4900]: I0127 14:09:09.397617 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-k2gzb_50203e3a-7094-487f-9d3b-a9467363dfaf/extract-utilities/0.log" Jan 27 14:09:09 crc kubenswrapper[4900]: I0127 14:09:09.412239 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-k2gzb_50203e3a-7094-487f-9d3b-a9467363dfaf/extract-content/0.log" Jan 27 14:09:09 crc kubenswrapper[4900]: I0127 14:09:09.628718 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-k2gzb_50203e3a-7094-487f-9d3b-a9467363dfaf/extract-content/0.log" Jan 27 14:09:09 crc kubenswrapper[4900]: I0127 14:09:09.699143 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-k2gzb_50203e3a-7094-487f-9d3b-a9467363dfaf/extract-utilities/0.log" Jan 27 14:09:09 crc kubenswrapper[4900]: I0127 14:09:09.933513 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vc5r8_c42ffb69-dba9-4ce2-8fe6-a5581776859f/extract-utilities/0.log" Jan 27 14:09:10 crc kubenswrapper[4900]: I0127 14:09:10.147499 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-k2gzb_50203e3a-7094-487f-9d3b-a9467363dfaf/registry-server/0.log" Jan 27 14:09:10 crc kubenswrapper[4900]: I0127 14:09:10.181430 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vc5r8_c42ffb69-dba9-4ce2-8fe6-a5581776859f/extract-content/0.log" Jan 27 14:09:10 crc kubenswrapper[4900]: I0127 14:09:10.281922 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vc5r8_c42ffb69-dba9-4ce2-8fe6-a5581776859f/extract-utilities/0.log" Jan 27 14:09:10 crc kubenswrapper[4900]: I0127 14:09:10.292168 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vc5r8_c42ffb69-dba9-4ce2-8fe6-a5581776859f/extract-content/0.log" Jan 27 14:09:10 crc kubenswrapper[4900]: I0127 14:09:10.469707 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zkzmp_008df9f5-f660-4c50-b9d1-adf18fa073d1/registry-server/0.log" Jan 27 14:09:10 crc kubenswrapper[4900]: I0127 14:09:10.558273 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vc5r8_c42ffb69-dba9-4ce2-8fe6-a5581776859f/extract-utilities/0.log" Jan 27 14:09:10 crc kubenswrapper[4900]: I0127 14:09:10.612188 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vc5r8_c42ffb69-dba9-4ce2-8fe6-a5581776859f/extract-content/0.log" Jan 27 14:09:11 crc kubenswrapper[4900]: I0127 14:09:11.678905 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vc5r8_c42ffb69-dba9-4ce2-8fe6-a5581776859f/registry-server/0.log" Jan 27 14:09:27 crc kubenswrapper[4900]: I0127 14:09:27.584023 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-76fmn_61aa23bf-0ece-4bf6-a963-542bd8b399c6/prometheus-operator/0.log" Jan 27 14:09:27 crc kubenswrapper[4900]: I0127 14:09:27.631345 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5f87745447-mhmkk_25cea2e4-2822-42b8-bd98-6a7f99e69c75/prometheus-operator-admission-webhook/0.log" Jan 27 14:09:27 crc kubenswrapper[4900]: I0127 14:09:27.643537 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5f87745447-sn6js_70cde964-7f6b-42e4-83f2-87e67664e70c/prometheus-operator-admission-webhook/0.log" Jan 27 14:09:27 crc kubenswrapper[4900]: I0127 14:09:27.982099 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-p424v_6ccb6d6c-6d83-4ac9-80ae-7ab1f66fc990/operator/0.log" Jan 27 14:09:27 crc kubenswrapper[4900]: I0127 14:09:27.982543 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-cbtnw_b925e9c1-ac78-41d5-a783-88a95ae66df6/perses-operator/0.log" Jan 27 14:09:27 crc kubenswrapper[4900]: I0127 14:09:27.990188 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-66cbf594b5-r84zs_d6f43148-e0ec-452e-b55f-a6bf0c4d5b37/observability-ui-dashboards/0.log" Jan 27 14:09:45 crc kubenswrapper[4900]: I0127 14:09:45.412306 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-849c99c676-jbpgt_386bc10f-9e5d-49d0-9906-e97f1796d49d/manager/0.log" Jan 27 14:09:45 crc kubenswrapper[4900]: I0127 14:09:45.423358 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-849c99c676-jbpgt_386bc10f-9e5d-49d0-9906-e97f1796d49d/kube-rbac-proxy/0.log" Jan 27 14:09:52 crc kubenswrapper[4900]: I0127 14:09:52.372557 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 14:09:52 crc kubenswrapper[4900]: I0127 14:09:52.373487 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 14:10:22 crc kubenswrapper[4900]: I0127 14:10:22.377538 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 14:10:22 crc kubenswrapper[4900]: I0127 14:10:22.380310 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 14:10:50 crc kubenswrapper[4900]: I0127 14:10:50.536955 4900 scope.go:117] "RemoveContainer" containerID="4a651343831316aec7b4dc4a45c9eccb34e826e57d6b07827b71e482971f83b1" Jan 27 14:10:52 crc kubenswrapper[4900]: I0127 14:10:52.372866 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 14:10:52 crc kubenswrapper[4900]: I0127 14:10:52.373361 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 14:10:52 crc kubenswrapper[4900]: I0127 14:10:52.373617 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 14:10:52 crc kubenswrapper[4900]: I0127 14:10:52.378900 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a245cd31603c7c9b66e0275ef2c47daca5fdd40177866c50f2678cd9bc819eef"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 14:10:52 crc kubenswrapper[4900]: I0127 14:10:52.379035 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://a245cd31603c7c9b66e0275ef2c47daca5fdd40177866c50f2678cd9bc819eef" gracePeriod=600 Jan 27 14:10:53 crc kubenswrapper[4900]: I0127 14:10:53.025405 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="a245cd31603c7c9b66e0275ef2c47daca5fdd40177866c50f2678cd9bc819eef" exitCode=0 Jan 27 14:10:53 crc kubenswrapper[4900]: I0127 14:10:53.025466 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"a245cd31603c7c9b66e0275ef2c47daca5fdd40177866c50f2678cd9bc819eef"} Jan 27 14:10:53 crc kubenswrapper[4900]: I0127 14:10:53.025937 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f"} Jan 27 14:10:53 crc kubenswrapper[4900]: I0127 14:10:53.025992 4900 scope.go:117] "RemoveContainer" containerID="8ba47f618f0555185e357dae71e759ee214c5078aa4a1de1e789ec99b897c245" Jan 27 14:11:04 crc kubenswrapper[4900]: I0127 14:11:04.064046 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cnhsb"] Jan 27 14:11:04 crc kubenswrapper[4900]: E0127 14:11:04.065307 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62d0d13c-d159-407a-b9d2-35a929753dea" containerName="container-00" Jan 27 14:11:04 crc kubenswrapper[4900]: I0127 14:11:04.065322 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="62d0d13c-d159-407a-b9d2-35a929753dea" containerName="container-00" Jan 27 14:11:04 crc kubenswrapper[4900]: I0127 14:11:04.065625 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="62d0d13c-d159-407a-b9d2-35a929753dea" containerName="container-00" Jan 27 14:11:04 crc kubenswrapper[4900]: I0127 14:11:04.072300 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:04 crc kubenswrapper[4900]: I0127 14:11:04.091189 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cnhsb"] Jan 27 14:11:04 crc kubenswrapper[4900]: I0127 14:11:04.130432 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qt8wr\" (UniqueName: \"kubernetes.io/projected/cea45818-df6c-437c-a04c-30e182bb237d-kube-api-access-qt8wr\") pod \"community-operators-cnhsb\" (UID: \"cea45818-df6c-437c-a04c-30e182bb237d\") " pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:04 crc kubenswrapper[4900]: I0127 14:11:04.130613 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cea45818-df6c-437c-a04c-30e182bb237d-catalog-content\") pod \"community-operators-cnhsb\" (UID: \"cea45818-df6c-437c-a04c-30e182bb237d\") " pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:04 crc kubenswrapper[4900]: I0127 14:11:04.131006 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cea45818-df6c-437c-a04c-30e182bb237d-utilities\") pod \"community-operators-cnhsb\" (UID: \"cea45818-df6c-437c-a04c-30e182bb237d\") " pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:04 crc kubenswrapper[4900]: I0127 14:11:04.524215 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qt8wr\" (UniqueName: \"kubernetes.io/projected/cea45818-df6c-437c-a04c-30e182bb237d-kube-api-access-qt8wr\") pod \"community-operators-cnhsb\" (UID: \"cea45818-df6c-437c-a04c-30e182bb237d\") " pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:04 crc kubenswrapper[4900]: I0127 14:11:04.524438 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cea45818-df6c-437c-a04c-30e182bb237d-catalog-content\") pod \"community-operators-cnhsb\" (UID: \"cea45818-df6c-437c-a04c-30e182bb237d\") " pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:04 crc kubenswrapper[4900]: I0127 14:11:04.524958 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cea45818-df6c-437c-a04c-30e182bb237d-utilities\") pod \"community-operators-cnhsb\" (UID: \"cea45818-df6c-437c-a04c-30e182bb237d\") " pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:04 crc kubenswrapper[4900]: I0127 14:11:04.653455 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cea45818-df6c-437c-a04c-30e182bb237d-catalog-content\") pod \"community-operators-cnhsb\" (UID: \"cea45818-df6c-437c-a04c-30e182bb237d\") " pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:04 crc kubenswrapper[4900]: I0127 14:11:04.654369 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cea45818-df6c-437c-a04c-30e182bb237d-utilities\") pod \"community-operators-cnhsb\" (UID: \"cea45818-df6c-437c-a04c-30e182bb237d\") " pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:04 crc kubenswrapper[4900]: I0127 14:11:04.746070 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qt8wr\" (UniqueName: \"kubernetes.io/projected/cea45818-df6c-437c-a04c-30e182bb237d-kube-api-access-qt8wr\") pod \"community-operators-cnhsb\" (UID: \"cea45818-df6c-437c-a04c-30e182bb237d\") " pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:05 crc kubenswrapper[4900]: I0127 14:11:05.007040 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:07 crc kubenswrapper[4900]: W0127 14:11:07.028805 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcea45818_df6c_437c_a04c_30e182bb237d.slice/crio-5f2423e0fd7b4a7a0ce357664614a9dbf0c5e339514d63caebb3692ce22e60b8 WatchSource:0}: Error finding container 5f2423e0fd7b4a7a0ce357664614a9dbf0c5e339514d63caebb3692ce22e60b8: Status 404 returned error can't find the container with id 5f2423e0fd7b4a7a0ce357664614a9dbf0c5e339514d63caebb3692ce22e60b8 Jan 27 14:11:07 crc kubenswrapper[4900]: I0127 14:11:07.029043 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cnhsb"] Jan 27 14:11:07 crc kubenswrapper[4900]: I0127 14:11:07.739664 4900 generic.go:334] "Generic (PLEG): container finished" podID="cea45818-df6c-437c-a04c-30e182bb237d" containerID="86e72c48b2bb378b9183b64d4d7800cdaf635ad1e21ca7f87dcd0e1a5430d5db" exitCode=0 Jan 27 14:11:07 crc kubenswrapper[4900]: I0127 14:11:07.739803 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cnhsb" event={"ID":"cea45818-df6c-437c-a04c-30e182bb237d","Type":"ContainerDied","Data":"86e72c48b2bb378b9183b64d4d7800cdaf635ad1e21ca7f87dcd0e1a5430d5db"} Jan 27 14:11:07 crc kubenswrapper[4900]: I0127 14:11:07.740114 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cnhsb" event={"ID":"cea45818-df6c-437c-a04c-30e182bb237d","Type":"ContainerStarted","Data":"5f2423e0fd7b4a7a0ce357664614a9dbf0c5e339514d63caebb3692ce22e60b8"} Jan 27 14:11:07 crc kubenswrapper[4900]: I0127 14:11:07.765943 4900 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 27 14:11:09 crc kubenswrapper[4900]: I0127 14:11:09.790308 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cnhsb" event={"ID":"cea45818-df6c-437c-a04c-30e182bb237d","Type":"ContainerStarted","Data":"266fd1c5b2bb83688c1d54c677e18e2efb06d94b2bf293858411eebd3b85e301"} Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.071643 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-q58h5"] Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.076747 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.090073 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q58h5"] Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.230729 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msbk6\" (UniqueName: \"kubernetes.io/projected/a2cec16b-534d-400f-b553-8384c78fdd87-kube-api-access-msbk6\") pod \"certified-operators-q58h5\" (UID: \"a2cec16b-534d-400f-b553-8384c78fdd87\") " pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.231197 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cec16b-534d-400f-b553-8384c78fdd87-catalog-content\") pod \"certified-operators-q58h5\" (UID: \"a2cec16b-534d-400f-b553-8384c78fdd87\") " pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.231611 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cec16b-534d-400f-b553-8384c78fdd87-utilities\") pod \"certified-operators-q58h5\" (UID: \"a2cec16b-534d-400f-b553-8384c78fdd87\") " pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.335241 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cec16b-534d-400f-b553-8384c78fdd87-utilities\") pod \"certified-operators-q58h5\" (UID: \"a2cec16b-534d-400f-b553-8384c78fdd87\") " pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.335452 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msbk6\" (UniqueName: \"kubernetes.io/projected/a2cec16b-534d-400f-b553-8384c78fdd87-kube-api-access-msbk6\") pod \"certified-operators-q58h5\" (UID: \"a2cec16b-534d-400f-b553-8384c78fdd87\") " pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.335584 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cec16b-534d-400f-b553-8384c78fdd87-catalog-content\") pod \"certified-operators-q58h5\" (UID: \"a2cec16b-534d-400f-b553-8384c78fdd87\") " pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.336072 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cec16b-534d-400f-b553-8384c78fdd87-utilities\") pod \"certified-operators-q58h5\" (UID: \"a2cec16b-534d-400f-b553-8384c78fdd87\") " pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.336146 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cec16b-534d-400f-b553-8384c78fdd87-catalog-content\") pod \"certified-operators-q58h5\" (UID: \"a2cec16b-534d-400f-b553-8384c78fdd87\") " pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.356652 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msbk6\" (UniqueName: \"kubernetes.io/projected/a2cec16b-534d-400f-b553-8384c78fdd87-kube-api-access-msbk6\") pod \"certified-operators-q58h5\" (UID: \"a2cec16b-534d-400f-b553-8384c78fdd87\") " pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.437323 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.835654 4900 generic.go:334] "Generic (PLEG): container finished" podID="cea45818-df6c-437c-a04c-30e182bb237d" containerID="266fd1c5b2bb83688c1d54c677e18e2efb06d94b2bf293858411eebd3b85e301" exitCode=0 Jan 27 14:11:12 crc kubenswrapper[4900]: I0127 14:11:12.836078 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cnhsb" event={"ID":"cea45818-df6c-437c-a04c-30e182bb237d","Type":"ContainerDied","Data":"266fd1c5b2bb83688c1d54c677e18e2efb06d94b2bf293858411eebd3b85e301"} Jan 27 14:11:13 crc kubenswrapper[4900]: I0127 14:11:13.072445 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q58h5"] Jan 27 14:11:13 crc kubenswrapper[4900]: I0127 14:11:13.872009 4900 generic.go:334] "Generic (PLEG): container finished" podID="a2cec16b-534d-400f-b553-8384c78fdd87" containerID="d7c29f53eff5bb1c00a20f0f9aab72a5b70c444bedda752b3ca81b049d3074e3" exitCode=0 Jan 27 14:11:13 crc kubenswrapper[4900]: I0127 14:11:13.873854 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q58h5" event={"ID":"a2cec16b-534d-400f-b553-8384c78fdd87","Type":"ContainerDied","Data":"d7c29f53eff5bb1c00a20f0f9aab72a5b70c444bedda752b3ca81b049d3074e3"} Jan 27 14:11:13 crc kubenswrapper[4900]: I0127 14:11:13.873906 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q58h5" event={"ID":"a2cec16b-534d-400f-b553-8384c78fdd87","Type":"ContainerStarted","Data":"6f0ccaa993c5d240edb76f394987d4575953d5b76e6be5877b1be01ebc559866"} Jan 27 14:11:14 crc kubenswrapper[4900]: I0127 14:11:14.891726 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cnhsb" event={"ID":"cea45818-df6c-437c-a04c-30e182bb237d","Type":"ContainerStarted","Data":"667c2589dbaf0cf8f7e88047838e5b0d8e2d143ca3112260f51d7a895a53d0cf"} Jan 27 14:11:14 crc kubenswrapper[4900]: I0127 14:11:14.936425 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cnhsb" podStartSLOduration=5.125033378 podStartE2EDuration="10.936396887s" podCreationTimestamp="2026-01-27 14:11:04 +0000 UTC" firstStartedPulling="2026-01-27 14:11:07.743888149 +0000 UTC m=+6294.980916359" lastFinishedPulling="2026-01-27 14:11:13.555251658 +0000 UTC m=+6300.792279868" observedRunningTime="2026-01-27 14:11:14.915813007 +0000 UTC m=+6302.152841237" watchObservedRunningTime="2026-01-27 14:11:14.936396887 +0000 UTC m=+6302.173425097" Jan 27 14:11:15 crc kubenswrapper[4900]: I0127 14:11:15.012752 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:15 crc kubenswrapper[4900]: I0127 14:11:15.013424 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:15 crc kubenswrapper[4900]: I0127 14:11:15.908000 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q58h5" event={"ID":"a2cec16b-534d-400f-b553-8384c78fdd87","Type":"ContainerStarted","Data":"962e5ff7edf92ccd5ce26efc4d225817cfc9f2ad77181bb67e2523b55a760c25"} Jan 27 14:11:16 crc kubenswrapper[4900]: I0127 14:11:16.267747 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-cnhsb" podUID="cea45818-df6c-437c-a04c-30e182bb237d" containerName="registry-server" probeResult="failure" output=< Jan 27 14:11:16 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:11:16 crc kubenswrapper[4900]: > Jan 27 14:11:18 crc kubenswrapper[4900]: I0127 14:11:18.980519 4900 generic.go:334] "Generic (PLEG): container finished" podID="a2cec16b-534d-400f-b553-8384c78fdd87" containerID="962e5ff7edf92ccd5ce26efc4d225817cfc9f2ad77181bb67e2523b55a760c25" exitCode=0 Jan 27 14:11:18 crc kubenswrapper[4900]: I0127 14:11:18.980594 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q58h5" event={"ID":"a2cec16b-534d-400f-b553-8384c78fdd87","Type":"ContainerDied","Data":"962e5ff7edf92ccd5ce26efc4d225817cfc9f2ad77181bb67e2523b55a760c25"} Jan 27 14:11:21 crc kubenswrapper[4900]: I0127 14:11:21.036183 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q58h5" event={"ID":"a2cec16b-534d-400f-b553-8384c78fdd87","Type":"ContainerStarted","Data":"1e246e6a11861bf3c58e70e67adaa7a31a4182e75f814d168930c87beacff4a7"} Jan 27 14:11:21 crc kubenswrapper[4900]: I0127 14:11:21.069145 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-q58h5" podStartSLOduration=2.918892215 podStartE2EDuration="9.069113059s" podCreationTimestamp="2026-01-27 14:11:12 +0000 UTC" firstStartedPulling="2026-01-27 14:11:13.879352599 +0000 UTC m=+6301.116380809" lastFinishedPulling="2026-01-27 14:11:20.029573443 +0000 UTC m=+6307.266601653" observedRunningTime="2026-01-27 14:11:21.062231195 +0000 UTC m=+6308.299259405" watchObservedRunningTime="2026-01-27 14:11:21.069113059 +0000 UTC m=+6308.306141269" Jan 27 14:11:22 crc kubenswrapper[4900]: I0127 14:11:22.437614 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:22 crc kubenswrapper[4900]: I0127 14:11:22.438074 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:23 crc kubenswrapper[4900]: I0127 14:11:23.503215 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-q58h5" podUID="a2cec16b-534d-400f-b553-8384c78fdd87" containerName="registry-server" probeResult="failure" output=< Jan 27 14:11:23 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:11:23 crc kubenswrapper[4900]: > Jan 27 14:11:26 crc kubenswrapper[4900]: I0127 14:11:26.079678 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-cnhsb" podUID="cea45818-df6c-437c-a04c-30e182bb237d" containerName="registry-server" probeResult="failure" output=< Jan 27 14:11:26 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:11:26 crc kubenswrapper[4900]: > Jan 27 14:11:34 crc kubenswrapper[4900]: I0127 14:11:34.496756 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-q58h5" podUID="a2cec16b-534d-400f-b553-8384c78fdd87" containerName="registry-server" probeResult="failure" output=< Jan 27 14:11:34 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:11:34 crc kubenswrapper[4900]: > Jan 27 14:11:35 crc kubenswrapper[4900]: I0127 14:11:35.074444 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:35 crc kubenswrapper[4900]: I0127 14:11:35.150006 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:35 crc kubenswrapper[4900]: I0127 14:11:35.328635 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cnhsb"] Jan 27 14:11:36 crc kubenswrapper[4900]: I0127 14:11:36.258976 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cnhsb" podUID="cea45818-df6c-437c-a04c-30e182bb237d" containerName="registry-server" containerID="cri-o://667c2589dbaf0cf8f7e88047838e5b0d8e2d143ca3112260f51d7a895a53d0cf" gracePeriod=2 Jan 27 14:11:36 crc kubenswrapper[4900]: I0127 14:11:36.916978 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:36 crc kubenswrapper[4900]: I0127 14:11:36.928945 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cea45818-df6c-437c-a04c-30e182bb237d-catalog-content\") pod \"cea45818-df6c-437c-a04c-30e182bb237d\" (UID: \"cea45818-df6c-437c-a04c-30e182bb237d\") " Jan 27 14:11:36 crc kubenswrapper[4900]: I0127 14:11:36.929153 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qt8wr\" (UniqueName: \"kubernetes.io/projected/cea45818-df6c-437c-a04c-30e182bb237d-kube-api-access-qt8wr\") pod \"cea45818-df6c-437c-a04c-30e182bb237d\" (UID: \"cea45818-df6c-437c-a04c-30e182bb237d\") " Jan 27 14:11:36 crc kubenswrapper[4900]: I0127 14:11:36.929287 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cea45818-df6c-437c-a04c-30e182bb237d-utilities\") pod \"cea45818-df6c-437c-a04c-30e182bb237d\" (UID: \"cea45818-df6c-437c-a04c-30e182bb237d\") " Jan 27 14:11:36 crc kubenswrapper[4900]: I0127 14:11:36.930004 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cea45818-df6c-437c-a04c-30e182bb237d-utilities" (OuterVolumeSpecName: "utilities") pod "cea45818-df6c-437c-a04c-30e182bb237d" (UID: "cea45818-df6c-437c-a04c-30e182bb237d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:11:36 crc kubenswrapper[4900]: I0127 14:11:36.943794 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cea45818-df6c-437c-a04c-30e182bb237d-kube-api-access-qt8wr" (OuterVolumeSpecName: "kube-api-access-qt8wr") pod "cea45818-df6c-437c-a04c-30e182bb237d" (UID: "cea45818-df6c-437c-a04c-30e182bb237d"). InnerVolumeSpecName "kube-api-access-qt8wr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.012452 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cea45818-df6c-437c-a04c-30e182bb237d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cea45818-df6c-437c-a04c-30e182bb237d" (UID: "cea45818-df6c-437c-a04c-30e182bb237d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.033661 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cea45818-df6c-437c-a04c-30e182bb237d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.033702 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qt8wr\" (UniqueName: \"kubernetes.io/projected/cea45818-df6c-437c-a04c-30e182bb237d-kube-api-access-qt8wr\") on node \"crc\" DevicePath \"\"" Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.033717 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cea45818-df6c-437c-a04c-30e182bb237d-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.276901 4900 generic.go:334] "Generic (PLEG): container finished" podID="cea45818-df6c-437c-a04c-30e182bb237d" containerID="667c2589dbaf0cf8f7e88047838e5b0d8e2d143ca3112260f51d7a895a53d0cf" exitCode=0 Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.277007 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cnhsb" event={"ID":"cea45818-df6c-437c-a04c-30e182bb237d","Type":"ContainerDied","Data":"667c2589dbaf0cf8f7e88047838e5b0d8e2d143ca3112260f51d7a895a53d0cf"} Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.277410 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cnhsb" event={"ID":"cea45818-df6c-437c-a04c-30e182bb237d","Type":"ContainerDied","Data":"5f2423e0fd7b4a7a0ce357664614a9dbf0c5e339514d63caebb3692ce22e60b8"} Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.277438 4900 scope.go:117] "RemoveContainer" containerID="667c2589dbaf0cf8f7e88047838e5b0d8e2d143ca3112260f51d7a895a53d0cf" Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.277046 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cnhsb" Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.315199 4900 scope.go:117] "RemoveContainer" containerID="266fd1c5b2bb83688c1d54c677e18e2efb06d94b2bf293858411eebd3b85e301" Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.323155 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cnhsb"] Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.374954 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cnhsb"] Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.376719 4900 scope.go:117] "RemoveContainer" containerID="86e72c48b2bb378b9183b64d4d7800cdaf635ad1e21ca7f87dcd0e1a5430d5db" Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.441777 4900 scope.go:117] "RemoveContainer" containerID="667c2589dbaf0cf8f7e88047838e5b0d8e2d143ca3112260f51d7a895a53d0cf" Jan 27 14:11:37 crc kubenswrapper[4900]: E0127 14:11:37.442741 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"667c2589dbaf0cf8f7e88047838e5b0d8e2d143ca3112260f51d7a895a53d0cf\": container with ID starting with 667c2589dbaf0cf8f7e88047838e5b0d8e2d143ca3112260f51d7a895a53d0cf not found: ID does not exist" containerID="667c2589dbaf0cf8f7e88047838e5b0d8e2d143ca3112260f51d7a895a53d0cf" Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.442809 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"667c2589dbaf0cf8f7e88047838e5b0d8e2d143ca3112260f51d7a895a53d0cf"} err="failed to get container status \"667c2589dbaf0cf8f7e88047838e5b0d8e2d143ca3112260f51d7a895a53d0cf\": rpc error: code = NotFound desc = could not find container \"667c2589dbaf0cf8f7e88047838e5b0d8e2d143ca3112260f51d7a895a53d0cf\": container with ID starting with 667c2589dbaf0cf8f7e88047838e5b0d8e2d143ca3112260f51d7a895a53d0cf not found: ID does not exist" Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.442855 4900 scope.go:117] "RemoveContainer" containerID="266fd1c5b2bb83688c1d54c677e18e2efb06d94b2bf293858411eebd3b85e301" Jan 27 14:11:37 crc kubenswrapper[4900]: E0127 14:11:37.443487 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"266fd1c5b2bb83688c1d54c677e18e2efb06d94b2bf293858411eebd3b85e301\": container with ID starting with 266fd1c5b2bb83688c1d54c677e18e2efb06d94b2bf293858411eebd3b85e301 not found: ID does not exist" containerID="266fd1c5b2bb83688c1d54c677e18e2efb06d94b2bf293858411eebd3b85e301" Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.443528 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"266fd1c5b2bb83688c1d54c677e18e2efb06d94b2bf293858411eebd3b85e301"} err="failed to get container status \"266fd1c5b2bb83688c1d54c677e18e2efb06d94b2bf293858411eebd3b85e301\": rpc error: code = NotFound desc = could not find container \"266fd1c5b2bb83688c1d54c677e18e2efb06d94b2bf293858411eebd3b85e301\": container with ID starting with 266fd1c5b2bb83688c1d54c677e18e2efb06d94b2bf293858411eebd3b85e301 not found: ID does not exist" Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.443570 4900 scope.go:117] "RemoveContainer" containerID="86e72c48b2bb378b9183b64d4d7800cdaf635ad1e21ca7f87dcd0e1a5430d5db" Jan 27 14:11:37 crc kubenswrapper[4900]: E0127 14:11:37.443925 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86e72c48b2bb378b9183b64d4d7800cdaf635ad1e21ca7f87dcd0e1a5430d5db\": container with ID starting with 86e72c48b2bb378b9183b64d4d7800cdaf635ad1e21ca7f87dcd0e1a5430d5db not found: ID does not exist" containerID="86e72c48b2bb378b9183b64d4d7800cdaf635ad1e21ca7f87dcd0e1a5430d5db" Jan 27 14:11:37 crc kubenswrapper[4900]: I0127 14:11:37.443954 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86e72c48b2bb378b9183b64d4d7800cdaf635ad1e21ca7f87dcd0e1a5430d5db"} err="failed to get container status \"86e72c48b2bb378b9183b64d4d7800cdaf635ad1e21ca7f87dcd0e1a5430d5db\": rpc error: code = NotFound desc = could not find container \"86e72c48b2bb378b9183b64d4d7800cdaf635ad1e21ca7f87dcd0e1a5430d5db\": container with ID starting with 86e72c48b2bb378b9183b64d4d7800cdaf635ad1e21ca7f87dcd0e1a5430d5db not found: ID does not exist" Jan 27 14:11:38 crc kubenswrapper[4900]: I0127 14:11:38.496340 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cea45818-df6c-437c-a04c-30e182bb237d" path="/var/lib/kubelet/pods/cea45818-df6c-437c-a04c-30e182bb237d/volumes" Jan 27 14:11:42 crc kubenswrapper[4900]: I0127 14:11:42.507231 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:42 crc kubenswrapper[4900]: I0127 14:11:42.568020 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:43 crc kubenswrapper[4900]: I0127 14:11:43.291712 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q58h5"] Jan 27 14:11:44 crc kubenswrapper[4900]: I0127 14:11:44.382974 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-q58h5" podUID="a2cec16b-534d-400f-b553-8384c78fdd87" containerName="registry-server" containerID="cri-o://1e246e6a11861bf3c58e70e67adaa7a31a4182e75f814d168930c87beacff4a7" gracePeriod=2 Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.304536 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.390653 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cec16b-534d-400f-b553-8384c78fdd87-catalog-content\") pod \"a2cec16b-534d-400f-b553-8384c78fdd87\" (UID: \"a2cec16b-534d-400f-b553-8384c78fdd87\") " Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.390800 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msbk6\" (UniqueName: \"kubernetes.io/projected/a2cec16b-534d-400f-b553-8384c78fdd87-kube-api-access-msbk6\") pod \"a2cec16b-534d-400f-b553-8384c78fdd87\" (UID: \"a2cec16b-534d-400f-b553-8384c78fdd87\") " Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.391187 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cec16b-534d-400f-b553-8384c78fdd87-utilities\") pod \"a2cec16b-534d-400f-b553-8384c78fdd87\" (UID: \"a2cec16b-534d-400f-b553-8384c78fdd87\") " Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.392471 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2cec16b-534d-400f-b553-8384c78fdd87-utilities" (OuterVolumeSpecName: "utilities") pod "a2cec16b-534d-400f-b553-8384c78fdd87" (UID: "a2cec16b-534d-400f-b553-8384c78fdd87"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.401759 4900 generic.go:334] "Generic (PLEG): container finished" podID="a2cec16b-534d-400f-b553-8384c78fdd87" containerID="1e246e6a11861bf3c58e70e67adaa7a31a4182e75f814d168930c87beacff4a7" exitCode=0 Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.401834 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q58h5" event={"ID":"a2cec16b-534d-400f-b553-8384c78fdd87","Type":"ContainerDied","Data":"1e246e6a11861bf3c58e70e67adaa7a31a4182e75f814d168930c87beacff4a7"} Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.401889 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q58h5" event={"ID":"a2cec16b-534d-400f-b553-8384c78fdd87","Type":"ContainerDied","Data":"6f0ccaa993c5d240edb76f394987d4575953d5b76e6be5877b1be01ebc559866"} Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.401917 4900 scope.go:117] "RemoveContainer" containerID="1e246e6a11861bf3c58e70e67adaa7a31a4182e75f814d168930c87beacff4a7" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.401916 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q58h5" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.419000 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2cec16b-534d-400f-b553-8384c78fdd87-kube-api-access-msbk6" (OuterVolumeSpecName: "kube-api-access-msbk6") pod "a2cec16b-534d-400f-b553-8384c78fdd87" (UID: "a2cec16b-534d-400f-b553-8384c78fdd87"). InnerVolumeSpecName "kube-api-access-msbk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.467079 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2cec16b-534d-400f-b553-8384c78fdd87-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a2cec16b-534d-400f-b553-8384c78fdd87" (UID: "a2cec16b-534d-400f-b553-8384c78fdd87"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.495821 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cec16b-534d-400f-b553-8384c78fdd87-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.495867 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msbk6\" (UniqueName: \"kubernetes.io/projected/a2cec16b-534d-400f-b553-8384c78fdd87-kube-api-access-msbk6\") on node \"crc\" DevicePath \"\"" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.495882 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cec16b-534d-400f-b553-8384c78fdd87-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.522667 4900 scope.go:117] "RemoveContainer" containerID="962e5ff7edf92ccd5ce26efc4d225817cfc9f2ad77181bb67e2523b55a760c25" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.561602 4900 scope.go:117] "RemoveContainer" containerID="d7c29f53eff5bb1c00a20f0f9aab72a5b70c444bedda752b3ca81b049d3074e3" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.614611 4900 scope.go:117] "RemoveContainer" containerID="1e246e6a11861bf3c58e70e67adaa7a31a4182e75f814d168930c87beacff4a7" Jan 27 14:11:45 crc kubenswrapper[4900]: E0127 14:11:45.615462 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e246e6a11861bf3c58e70e67adaa7a31a4182e75f814d168930c87beacff4a7\": container with ID starting with 1e246e6a11861bf3c58e70e67adaa7a31a4182e75f814d168930c87beacff4a7 not found: ID does not exist" containerID="1e246e6a11861bf3c58e70e67adaa7a31a4182e75f814d168930c87beacff4a7" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.616117 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e246e6a11861bf3c58e70e67adaa7a31a4182e75f814d168930c87beacff4a7"} err="failed to get container status \"1e246e6a11861bf3c58e70e67adaa7a31a4182e75f814d168930c87beacff4a7\": rpc error: code = NotFound desc = could not find container \"1e246e6a11861bf3c58e70e67adaa7a31a4182e75f814d168930c87beacff4a7\": container with ID starting with 1e246e6a11861bf3c58e70e67adaa7a31a4182e75f814d168930c87beacff4a7 not found: ID does not exist" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.616222 4900 scope.go:117] "RemoveContainer" containerID="962e5ff7edf92ccd5ce26efc4d225817cfc9f2ad77181bb67e2523b55a760c25" Jan 27 14:11:45 crc kubenswrapper[4900]: E0127 14:11:45.616734 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"962e5ff7edf92ccd5ce26efc4d225817cfc9f2ad77181bb67e2523b55a760c25\": container with ID starting with 962e5ff7edf92ccd5ce26efc4d225817cfc9f2ad77181bb67e2523b55a760c25 not found: ID does not exist" containerID="962e5ff7edf92ccd5ce26efc4d225817cfc9f2ad77181bb67e2523b55a760c25" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.616790 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"962e5ff7edf92ccd5ce26efc4d225817cfc9f2ad77181bb67e2523b55a760c25"} err="failed to get container status \"962e5ff7edf92ccd5ce26efc4d225817cfc9f2ad77181bb67e2523b55a760c25\": rpc error: code = NotFound desc = could not find container \"962e5ff7edf92ccd5ce26efc4d225817cfc9f2ad77181bb67e2523b55a760c25\": container with ID starting with 962e5ff7edf92ccd5ce26efc4d225817cfc9f2ad77181bb67e2523b55a760c25 not found: ID does not exist" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.616823 4900 scope.go:117] "RemoveContainer" containerID="d7c29f53eff5bb1c00a20f0f9aab72a5b70c444bedda752b3ca81b049d3074e3" Jan 27 14:11:45 crc kubenswrapper[4900]: E0127 14:11:45.617114 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7c29f53eff5bb1c00a20f0f9aab72a5b70c444bedda752b3ca81b049d3074e3\": container with ID starting with d7c29f53eff5bb1c00a20f0f9aab72a5b70c444bedda752b3ca81b049d3074e3 not found: ID does not exist" containerID="d7c29f53eff5bb1c00a20f0f9aab72a5b70c444bedda752b3ca81b049d3074e3" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.617144 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7c29f53eff5bb1c00a20f0f9aab72a5b70c444bedda752b3ca81b049d3074e3"} err="failed to get container status \"d7c29f53eff5bb1c00a20f0f9aab72a5b70c444bedda752b3ca81b049d3074e3\": rpc error: code = NotFound desc = could not find container \"d7c29f53eff5bb1c00a20f0f9aab72a5b70c444bedda752b3ca81b049d3074e3\": container with ID starting with d7c29f53eff5bb1c00a20f0f9aab72a5b70c444bedda752b3ca81b049d3074e3 not found: ID does not exist" Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.747791 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q58h5"] Jan 27 14:11:45 crc kubenswrapper[4900]: I0127 14:11:45.762056 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-q58h5"] Jan 27 14:11:46 crc kubenswrapper[4900]: I0127 14:11:46.515183 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2cec16b-534d-400f-b553-8384c78fdd87" path="/var/lib/kubelet/pods/a2cec16b-534d-400f-b553-8384c78fdd87/volumes" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.603616 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6ktv4"] Jan 27 14:12:32 crc kubenswrapper[4900]: E0127 14:12:32.608378 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cea45818-df6c-437c-a04c-30e182bb237d" containerName="extract-content" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.608762 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="cea45818-df6c-437c-a04c-30e182bb237d" containerName="extract-content" Jan 27 14:12:32 crc kubenswrapper[4900]: E0127 14:12:32.608791 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cea45818-df6c-437c-a04c-30e182bb237d" containerName="extract-utilities" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.608799 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="cea45818-df6c-437c-a04c-30e182bb237d" containerName="extract-utilities" Jan 27 14:12:32 crc kubenswrapper[4900]: E0127 14:12:32.608836 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cea45818-df6c-437c-a04c-30e182bb237d" containerName="registry-server" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.608843 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="cea45818-df6c-437c-a04c-30e182bb237d" containerName="registry-server" Jan 27 14:12:32 crc kubenswrapper[4900]: E0127 14:12:32.608870 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2cec16b-534d-400f-b553-8384c78fdd87" containerName="extract-content" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.608877 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2cec16b-534d-400f-b553-8384c78fdd87" containerName="extract-content" Jan 27 14:12:32 crc kubenswrapper[4900]: E0127 14:12:32.608893 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2cec16b-534d-400f-b553-8384c78fdd87" containerName="registry-server" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.608900 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2cec16b-534d-400f-b553-8384c78fdd87" containerName="registry-server" Jan 27 14:12:32 crc kubenswrapper[4900]: E0127 14:12:32.608920 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2cec16b-534d-400f-b553-8384c78fdd87" containerName="extract-utilities" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.608926 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2cec16b-534d-400f-b553-8384c78fdd87" containerName="extract-utilities" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.609502 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2cec16b-534d-400f-b553-8384c78fdd87" containerName="registry-server" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.609525 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="cea45818-df6c-437c-a04c-30e182bb237d" containerName="registry-server" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.613263 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.638662 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6ktv4"] Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.703965 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/170839d9-ded1-4b70-aa0f-3b54243299ed-utilities\") pod \"redhat-marketplace-6ktv4\" (UID: \"170839d9-ded1-4b70-aa0f-3b54243299ed\") " pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.704255 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/170839d9-ded1-4b70-aa0f-3b54243299ed-catalog-content\") pod \"redhat-marketplace-6ktv4\" (UID: \"170839d9-ded1-4b70-aa0f-3b54243299ed\") " pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.704300 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swl9t\" (UniqueName: \"kubernetes.io/projected/170839d9-ded1-4b70-aa0f-3b54243299ed-kube-api-access-swl9t\") pod \"redhat-marketplace-6ktv4\" (UID: \"170839d9-ded1-4b70-aa0f-3b54243299ed\") " pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.807303 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/170839d9-ded1-4b70-aa0f-3b54243299ed-catalog-content\") pod \"redhat-marketplace-6ktv4\" (UID: \"170839d9-ded1-4b70-aa0f-3b54243299ed\") " pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.807367 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swl9t\" (UniqueName: \"kubernetes.io/projected/170839d9-ded1-4b70-aa0f-3b54243299ed-kube-api-access-swl9t\") pod \"redhat-marketplace-6ktv4\" (UID: \"170839d9-ded1-4b70-aa0f-3b54243299ed\") " pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.807534 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/170839d9-ded1-4b70-aa0f-3b54243299ed-utilities\") pod \"redhat-marketplace-6ktv4\" (UID: \"170839d9-ded1-4b70-aa0f-3b54243299ed\") " pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.808071 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/170839d9-ded1-4b70-aa0f-3b54243299ed-catalog-content\") pod \"redhat-marketplace-6ktv4\" (UID: \"170839d9-ded1-4b70-aa0f-3b54243299ed\") " pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.808211 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/170839d9-ded1-4b70-aa0f-3b54243299ed-utilities\") pod \"redhat-marketplace-6ktv4\" (UID: \"170839d9-ded1-4b70-aa0f-3b54243299ed\") " pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.832128 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swl9t\" (UniqueName: \"kubernetes.io/projected/170839d9-ded1-4b70-aa0f-3b54243299ed-kube-api-access-swl9t\") pod \"redhat-marketplace-6ktv4\" (UID: \"170839d9-ded1-4b70-aa0f-3b54243299ed\") " pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:32 crc kubenswrapper[4900]: I0127 14:12:32.958085 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:33 crc kubenswrapper[4900]: W0127 14:12:33.517215 4900 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod170839d9_ded1_4b70_aa0f_3b54243299ed.slice/crio-23e3741c939f9b0121e514233e6cde5ddc5bc90f9747ef58a7c57abd55d6ab59 WatchSource:0}: Error finding container 23e3741c939f9b0121e514233e6cde5ddc5bc90f9747ef58a7c57abd55d6ab59: Status 404 returned error can't find the container with id 23e3741c939f9b0121e514233e6cde5ddc5bc90f9747ef58a7c57abd55d6ab59 Jan 27 14:12:33 crc kubenswrapper[4900]: I0127 14:12:33.521444 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6ktv4"] Jan 27 14:12:34 crc kubenswrapper[4900]: I0127 14:12:34.169082 4900 generic.go:334] "Generic (PLEG): container finished" podID="170839d9-ded1-4b70-aa0f-3b54243299ed" containerID="0710e7fa66ec945fe43e558e96d339703ef79a3b6216cdc7f2f97396d5cfae0a" exitCode=0 Jan 27 14:12:34 crc kubenswrapper[4900]: I0127 14:12:34.169190 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6ktv4" event={"ID":"170839d9-ded1-4b70-aa0f-3b54243299ed","Type":"ContainerDied","Data":"0710e7fa66ec945fe43e558e96d339703ef79a3b6216cdc7f2f97396d5cfae0a"} Jan 27 14:12:34 crc kubenswrapper[4900]: I0127 14:12:34.169561 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6ktv4" event={"ID":"170839d9-ded1-4b70-aa0f-3b54243299ed","Type":"ContainerStarted","Data":"23e3741c939f9b0121e514233e6cde5ddc5bc90f9747ef58a7c57abd55d6ab59"} Jan 27 14:12:36 crc kubenswrapper[4900]: I0127 14:12:36.206111 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6ktv4" event={"ID":"170839d9-ded1-4b70-aa0f-3b54243299ed","Type":"ContainerStarted","Data":"4ccc89cf9e25562103b40ae8600cf9f92c99df15ff41fded7d38bcca861bad79"} Jan 27 14:12:36 crc kubenswrapper[4900]: I0127 14:12:36.209280 4900 generic.go:334] "Generic (PLEG): container finished" podID="ab9d673e-a635-497a-9322-cdbd3f9fd3b3" containerID="1d393d1a8374d65daf5205744e586b7e3d436659557079feb9523a3f975f969b" exitCode=0 Jan 27 14:12:36 crc kubenswrapper[4900]: I0127 14:12:36.209359 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-47j5g/must-gather-whhtm" event={"ID":"ab9d673e-a635-497a-9322-cdbd3f9fd3b3","Type":"ContainerDied","Data":"1d393d1a8374d65daf5205744e586b7e3d436659557079feb9523a3f975f969b"} Jan 27 14:12:36 crc kubenswrapper[4900]: I0127 14:12:36.210798 4900 scope.go:117] "RemoveContainer" containerID="1d393d1a8374d65daf5205744e586b7e3d436659557079feb9523a3f975f969b" Jan 27 14:12:36 crc kubenswrapper[4900]: I0127 14:12:36.490290 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-47j5g_must-gather-whhtm_ab9d673e-a635-497a-9322-cdbd3f9fd3b3/gather/0.log" Jan 27 14:12:37 crc kubenswrapper[4900]: I0127 14:12:37.227490 4900 generic.go:334] "Generic (PLEG): container finished" podID="170839d9-ded1-4b70-aa0f-3b54243299ed" containerID="4ccc89cf9e25562103b40ae8600cf9f92c99df15ff41fded7d38bcca861bad79" exitCode=0 Jan 27 14:12:37 crc kubenswrapper[4900]: I0127 14:12:37.227587 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6ktv4" event={"ID":"170839d9-ded1-4b70-aa0f-3b54243299ed","Type":"ContainerDied","Data":"4ccc89cf9e25562103b40ae8600cf9f92c99df15ff41fded7d38bcca861bad79"} Jan 27 14:12:38 crc kubenswrapper[4900]: I0127 14:12:38.280793 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6ktv4" event={"ID":"170839d9-ded1-4b70-aa0f-3b54243299ed","Type":"ContainerStarted","Data":"543afb42b2e3beda1d1af25e187a2d2e58fe5fe2ce62ce7d4fbb3fb17a3c1d9d"} Jan 27 14:12:38 crc kubenswrapper[4900]: I0127 14:12:38.328916 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6ktv4" podStartSLOduration=2.8772953 podStartE2EDuration="6.328880468s" podCreationTimestamp="2026-01-27 14:12:32 +0000 UTC" firstStartedPulling="2026-01-27 14:12:34.171854616 +0000 UTC m=+6381.408882826" lastFinishedPulling="2026-01-27 14:12:37.623439784 +0000 UTC m=+6384.860467994" observedRunningTime="2026-01-27 14:12:38.3087217 +0000 UTC m=+6385.545749920" watchObservedRunningTime="2026-01-27 14:12:38.328880468 +0000 UTC m=+6385.565908678" Jan 27 14:12:42 crc kubenswrapper[4900]: I0127 14:12:42.958874 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:42 crc kubenswrapper[4900]: I0127 14:12:42.959682 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:43 crc kubenswrapper[4900]: I0127 14:12:43.015442 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:43 crc kubenswrapper[4900]: I0127 14:12:43.428296 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:43 crc kubenswrapper[4900]: I0127 14:12:43.508236 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6ktv4"] Jan 27 14:12:46 crc kubenswrapper[4900]: I0127 14:12:46.172311 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6ktv4" podUID="170839d9-ded1-4b70-aa0f-3b54243299ed" containerName="registry-server" containerID="cri-o://543afb42b2e3beda1d1af25e187a2d2e58fe5fe2ce62ce7d4fbb3fb17a3c1d9d" gracePeriod=2 Jan 27 14:12:46 crc kubenswrapper[4900]: I0127 14:12:46.782012 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:46 crc kubenswrapper[4900]: I0127 14:12:46.851477 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/170839d9-ded1-4b70-aa0f-3b54243299ed-catalog-content\") pod \"170839d9-ded1-4b70-aa0f-3b54243299ed\" (UID: \"170839d9-ded1-4b70-aa0f-3b54243299ed\") " Jan 27 14:12:46 crc kubenswrapper[4900]: I0127 14:12:46.851690 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-swl9t\" (UniqueName: \"kubernetes.io/projected/170839d9-ded1-4b70-aa0f-3b54243299ed-kube-api-access-swl9t\") pod \"170839d9-ded1-4b70-aa0f-3b54243299ed\" (UID: \"170839d9-ded1-4b70-aa0f-3b54243299ed\") " Jan 27 14:12:46 crc kubenswrapper[4900]: I0127 14:12:46.851769 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/170839d9-ded1-4b70-aa0f-3b54243299ed-utilities\") pod \"170839d9-ded1-4b70-aa0f-3b54243299ed\" (UID: \"170839d9-ded1-4b70-aa0f-3b54243299ed\") " Jan 27 14:12:46 crc kubenswrapper[4900]: I0127 14:12:46.854005 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/170839d9-ded1-4b70-aa0f-3b54243299ed-utilities" (OuterVolumeSpecName: "utilities") pod "170839d9-ded1-4b70-aa0f-3b54243299ed" (UID: "170839d9-ded1-4b70-aa0f-3b54243299ed"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:12:46 crc kubenswrapper[4900]: I0127 14:12:46.861420 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/170839d9-ded1-4b70-aa0f-3b54243299ed-kube-api-access-swl9t" (OuterVolumeSpecName: "kube-api-access-swl9t") pod "170839d9-ded1-4b70-aa0f-3b54243299ed" (UID: "170839d9-ded1-4b70-aa0f-3b54243299ed"). InnerVolumeSpecName "kube-api-access-swl9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:12:46 crc kubenswrapper[4900]: I0127 14:12:46.880457 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/170839d9-ded1-4b70-aa0f-3b54243299ed-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "170839d9-ded1-4b70-aa0f-3b54243299ed" (UID: "170839d9-ded1-4b70-aa0f-3b54243299ed"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:12:46 crc kubenswrapper[4900]: I0127 14:12:46.956652 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-swl9t\" (UniqueName: \"kubernetes.io/projected/170839d9-ded1-4b70-aa0f-3b54243299ed-kube-api-access-swl9t\") on node \"crc\" DevicePath \"\"" Jan 27 14:12:46 crc kubenswrapper[4900]: I0127 14:12:46.956701 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/170839d9-ded1-4b70-aa0f-3b54243299ed-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 14:12:46 crc kubenswrapper[4900]: I0127 14:12:46.956711 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/170839d9-ded1-4b70-aa0f-3b54243299ed-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.190325 4900 generic.go:334] "Generic (PLEG): container finished" podID="170839d9-ded1-4b70-aa0f-3b54243299ed" containerID="543afb42b2e3beda1d1af25e187a2d2e58fe5fe2ce62ce7d4fbb3fb17a3c1d9d" exitCode=0 Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.190394 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6ktv4" event={"ID":"170839d9-ded1-4b70-aa0f-3b54243299ed","Type":"ContainerDied","Data":"543afb42b2e3beda1d1af25e187a2d2e58fe5fe2ce62ce7d4fbb3fb17a3c1d9d"} Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.190403 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6ktv4" Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.190435 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6ktv4" event={"ID":"170839d9-ded1-4b70-aa0f-3b54243299ed","Type":"ContainerDied","Data":"23e3741c939f9b0121e514233e6cde5ddc5bc90f9747ef58a7c57abd55d6ab59"} Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.190460 4900 scope.go:117] "RemoveContainer" containerID="543afb42b2e3beda1d1af25e187a2d2e58fe5fe2ce62ce7d4fbb3fb17a3c1d9d" Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.219571 4900 scope.go:117] "RemoveContainer" containerID="4ccc89cf9e25562103b40ae8600cf9f92c99df15ff41fded7d38bcca861bad79" Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.241743 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6ktv4"] Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.252850 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6ktv4"] Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.255087 4900 scope.go:117] "RemoveContainer" containerID="0710e7fa66ec945fe43e558e96d339703ef79a3b6216cdc7f2f97396d5cfae0a" Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.321537 4900 scope.go:117] "RemoveContainer" containerID="543afb42b2e3beda1d1af25e187a2d2e58fe5fe2ce62ce7d4fbb3fb17a3c1d9d" Jan 27 14:12:47 crc kubenswrapper[4900]: E0127 14:12:47.322395 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"543afb42b2e3beda1d1af25e187a2d2e58fe5fe2ce62ce7d4fbb3fb17a3c1d9d\": container with ID starting with 543afb42b2e3beda1d1af25e187a2d2e58fe5fe2ce62ce7d4fbb3fb17a3c1d9d not found: ID does not exist" containerID="543afb42b2e3beda1d1af25e187a2d2e58fe5fe2ce62ce7d4fbb3fb17a3c1d9d" Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.322452 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"543afb42b2e3beda1d1af25e187a2d2e58fe5fe2ce62ce7d4fbb3fb17a3c1d9d"} err="failed to get container status \"543afb42b2e3beda1d1af25e187a2d2e58fe5fe2ce62ce7d4fbb3fb17a3c1d9d\": rpc error: code = NotFound desc = could not find container \"543afb42b2e3beda1d1af25e187a2d2e58fe5fe2ce62ce7d4fbb3fb17a3c1d9d\": container with ID starting with 543afb42b2e3beda1d1af25e187a2d2e58fe5fe2ce62ce7d4fbb3fb17a3c1d9d not found: ID does not exist" Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.322488 4900 scope.go:117] "RemoveContainer" containerID="4ccc89cf9e25562103b40ae8600cf9f92c99df15ff41fded7d38bcca861bad79" Jan 27 14:12:47 crc kubenswrapper[4900]: E0127 14:12:47.323225 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ccc89cf9e25562103b40ae8600cf9f92c99df15ff41fded7d38bcca861bad79\": container with ID starting with 4ccc89cf9e25562103b40ae8600cf9f92c99df15ff41fded7d38bcca861bad79 not found: ID does not exist" containerID="4ccc89cf9e25562103b40ae8600cf9f92c99df15ff41fded7d38bcca861bad79" Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.323310 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ccc89cf9e25562103b40ae8600cf9f92c99df15ff41fded7d38bcca861bad79"} err="failed to get container status \"4ccc89cf9e25562103b40ae8600cf9f92c99df15ff41fded7d38bcca861bad79\": rpc error: code = NotFound desc = could not find container \"4ccc89cf9e25562103b40ae8600cf9f92c99df15ff41fded7d38bcca861bad79\": container with ID starting with 4ccc89cf9e25562103b40ae8600cf9f92c99df15ff41fded7d38bcca861bad79 not found: ID does not exist" Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.323355 4900 scope.go:117] "RemoveContainer" containerID="0710e7fa66ec945fe43e558e96d339703ef79a3b6216cdc7f2f97396d5cfae0a" Jan 27 14:12:47 crc kubenswrapper[4900]: E0127 14:12:47.323772 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0710e7fa66ec945fe43e558e96d339703ef79a3b6216cdc7f2f97396d5cfae0a\": container with ID starting with 0710e7fa66ec945fe43e558e96d339703ef79a3b6216cdc7f2f97396d5cfae0a not found: ID does not exist" containerID="0710e7fa66ec945fe43e558e96d339703ef79a3b6216cdc7f2f97396d5cfae0a" Jan 27 14:12:47 crc kubenswrapper[4900]: I0127 14:12:47.323803 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0710e7fa66ec945fe43e558e96d339703ef79a3b6216cdc7f2f97396d5cfae0a"} err="failed to get container status \"0710e7fa66ec945fe43e558e96d339703ef79a3b6216cdc7f2f97396d5cfae0a\": rpc error: code = NotFound desc = could not find container \"0710e7fa66ec945fe43e558e96d339703ef79a3b6216cdc7f2f97396d5cfae0a\": container with ID starting with 0710e7fa66ec945fe43e558e96d339703ef79a3b6216cdc7f2f97396d5cfae0a not found: ID does not exist" Jan 27 14:12:48 crc kubenswrapper[4900]: I0127 14:12:48.099791 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-47j5g/must-gather-whhtm"] Jan 27 14:12:48 crc kubenswrapper[4900]: I0127 14:12:48.100715 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-47j5g/must-gather-whhtm" podUID="ab9d673e-a635-497a-9322-cdbd3f9fd3b3" containerName="copy" containerID="cri-o://8aa49726bac092695f9c1bd6d6c7c7fd28b0f04c1369846195693f9bf97b173c" gracePeriod=2 Jan 27 14:12:48 crc kubenswrapper[4900]: I0127 14:12:48.116622 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-47j5g/must-gather-whhtm"] Jan 27 14:12:48 crc kubenswrapper[4900]: I0127 14:12:48.497968 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="170839d9-ded1-4b70-aa0f-3b54243299ed" path="/var/lib/kubelet/pods/170839d9-ded1-4b70-aa0f-3b54243299ed/volumes" Jan 27 14:12:48 crc kubenswrapper[4900]: I0127 14:12:48.857522 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-47j5g_must-gather-whhtm_ab9d673e-a635-497a-9322-cdbd3f9fd3b3/copy/0.log" Jan 27 14:12:48 crc kubenswrapper[4900]: I0127 14:12:48.858723 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/must-gather-whhtm" Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.034532 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ab9d673e-a635-497a-9322-cdbd3f9fd3b3-must-gather-output\") pod \"ab9d673e-a635-497a-9322-cdbd3f9fd3b3\" (UID: \"ab9d673e-a635-497a-9322-cdbd3f9fd3b3\") " Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.034773 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdxsw\" (UniqueName: \"kubernetes.io/projected/ab9d673e-a635-497a-9322-cdbd3f9fd3b3-kube-api-access-jdxsw\") pod \"ab9d673e-a635-497a-9322-cdbd3f9fd3b3\" (UID: \"ab9d673e-a635-497a-9322-cdbd3f9fd3b3\") " Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.045610 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab9d673e-a635-497a-9322-cdbd3f9fd3b3-kube-api-access-jdxsw" (OuterVolumeSpecName: "kube-api-access-jdxsw") pod "ab9d673e-a635-497a-9322-cdbd3f9fd3b3" (UID: "ab9d673e-a635-497a-9322-cdbd3f9fd3b3"). InnerVolumeSpecName "kube-api-access-jdxsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.138850 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdxsw\" (UniqueName: \"kubernetes.io/projected/ab9d673e-a635-497a-9322-cdbd3f9fd3b3-kube-api-access-jdxsw\") on node \"crc\" DevicePath \"\"" Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.257554 4900 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-47j5g_must-gather-whhtm_ab9d673e-a635-497a-9322-cdbd3f9fd3b3/copy/0.log" Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.259356 4900 generic.go:334] "Generic (PLEG): container finished" podID="ab9d673e-a635-497a-9322-cdbd3f9fd3b3" containerID="8aa49726bac092695f9c1bd6d6c7c7fd28b0f04c1369846195693f9bf97b173c" exitCode=143 Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.259445 4900 scope.go:117] "RemoveContainer" containerID="8aa49726bac092695f9c1bd6d6c7c7fd28b0f04c1369846195693f9bf97b173c" Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.259660 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-47j5g/must-gather-whhtm" Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.278454 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab9d673e-a635-497a-9322-cdbd3f9fd3b3-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "ab9d673e-a635-497a-9322-cdbd3f9fd3b3" (UID: "ab9d673e-a635-497a-9322-cdbd3f9fd3b3"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.296870 4900 scope.go:117] "RemoveContainer" containerID="1d393d1a8374d65daf5205744e586b7e3d436659557079feb9523a3f975f969b" Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.344783 4900 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ab9d673e-a635-497a-9322-cdbd3f9fd3b3-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.348498 4900 scope.go:117] "RemoveContainer" containerID="8aa49726bac092695f9c1bd6d6c7c7fd28b0f04c1369846195693f9bf97b173c" Jan 27 14:12:49 crc kubenswrapper[4900]: E0127 14:12:49.349167 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8aa49726bac092695f9c1bd6d6c7c7fd28b0f04c1369846195693f9bf97b173c\": container with ID starting with 8aa49726bac092695f9c1bd6d6c7c7fd28b0f04c1369846195693f9bf97b173c not found: ID does not exist" containerID="8aa49726bac092695f9c1bd6d6c7c7fd28b0f04c1369846195693f9bf97b173c" Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.349224 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8aa49726bac092695f9c1bd6d6c7c7fd28b0f04c1369846195693f9bf97b173c"} err="failed to get container status \"8aa49726bac092695f9c1bd6d6c7c7fd28b0f04c1369846195693f9bf97b173c\": rpc error: code = NotFound desc = could not find container \"8aa49726bac092695f9c1bd6d6c7c7fd28b0f04c1369846195693f9bf97b173c\": container with ID starting with 8aa49726bac092695f9c1bd6d6c7c7fd28b0f04c1369846195693f9bf97b173c not found: ID does not exist" Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.349261 4900 scope.go:117] "RemoveContainer" containerID="1d393d1a8374d65daf5205744e586b7e3d436659557079feb9523a3f975f969b" Jan 27 14:12:49 crc kubenswrapper[4900]: E0127 14:12:49.349575 4900 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d393d1a8374d65daf5205744e586b7e3d436659557079feb9523a3f975f969b\": container with ID starting with 1d393d1a8374d65daf5205744e586b7e3d436659557079feb9523a3f975f969b not found: ID does not exist" containerID="1d393d1a8374d65daf5205744e586b7e3d436659557079feb9523a3f975f969b" Jan 27 14:12:49 crc kubenswrapper[4900]: I0127 14:12:49.349607 4900 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d393d1a8374d65daf5205744e586b7e3d436659557079feb9523a3f975f969b"} err="failed to get container status \"1d393d1a8374d65daf5205744e586b7e3d436659557079feb9523a3f975f969b\": rpc error: code = NotFound desc = could not find container \"1d393d1a8374d65daf5205744e586b7e3d436659557079feb9523a3f975f969b\": container with ID starting with 1d393d1a8374d65daf5205744e586b7e3d436659557079feb9523a3f975f969b not found: ID does not exist" Jan 27 14:12:50 crc kubenswrapper[4900]: I0127 14:12:50.504569 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab9d673e-a635-497a-9322-cdbd3f9fd3b3" path="/var/lib/kubelet/pods/ab9d673e-a635-497a-9322-cdbd3f9fd3b3/volumes" Jan 27 14:12:52 crc kubenswrapper[4900]: I0127 14:12:52.372732 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 14:12:52 crc kubenswrapper[4900]: I0127 14:12:52.373325 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 14:13:22 crc kubenswrapper[4900]: I0127 14:13:22.372395 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 14:13:22 crc kubenswrapper[4900]: I0127 14:13:22.374837 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 14:13:44 crc kubenswrapper[4900]: I0127 14:13:44.793498 4900 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:13:45 crc kubenswrapper[4900]: I0127 14:13:45.127494 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="eca1d592-3310-47ed-a815-8f32bc974d9b" containerName="galera" probeResult="failure" output="command timed out" Jan 27 14:13:52 crc kubenswrapper[4900]: I0127 14:13:52.372549 4900 patch_prober.go:28] interesting pod/machine-config-daemon-2pp6x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 27 14:13:52 crc kubenswrapper[4900]: I0127 14:13:52.373282 4900 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 27 14:13:52 crc kubenswrapper[4900]: I0127 14:13:52.373341 4900 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" Jan 27 14:13:52 crc kubenswrapper[4900]: I0127 14:13:52.374667 4900 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f"} pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 27 14:13:52 crc kubenswrapper[4900]: I0127 14:13:52.374738 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerName="machine-config-daemon" containerID="cri-o://3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" gracePeriod=600 Jan 27 14:13:52 crc kubenswrapper[4900]: E0127 14:13:52.501211 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:13:53 crc kubenswrapper[4900]: I0127 14:13:53.228546 4900 generic.go:334] "Generic (PLEG): container finished" podID="2f2c6408-cc23-4b42-92ba-ef08be13637b" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" exitCode=0 Jan 27 14:13:53 crc kubenswrapper[4900]: I0127 14:13:53.228603 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerDied","Data":"3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f"} Jan 27 14:13:53 crc kubenswrapper[4900]: I0127 14:13:53.228655 4900 scope.go:117] "RemoveContainer" containerID="a245cd31603c7c9b66e0275ef2c47daca5fdd40177866c50f2678cd9bc819eef" Jan 27 14:13:53 crc kubenswrapper[4900]: I0127 14:13:53.230693 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:13:53 crc kubenswrapper[4900]: E0127 14:13:53.232532 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:14:08 crc kubenswrapper[4900]: I0127 14:14:08.485443 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:14:08 crc kubenswrapper[4900]: E0127 14:14:08.486643 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:14:22 crc kubenswrapper[4900]: I0127 14:14:22.483359 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:14:22 crc kubenswrapper[4900]: E0127 14:14:22.484489 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:14:34 crc kubenswrapper[4900]: I0127 14:14:34.484217 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:14:34 crc kubenswrapper[4900]: E0127 14:14:34.485589 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:14:46 crc kubenswrapper[4900]: I0127 14:14:46.498319 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:14:46 crc kubenswrapper[4900]: E0127 14:14:46.500960 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:14:57 crc kubenswrapper[4900]: I0127 14:14:57.485493 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:14:57 crc kubenswrapper[4900]: E0127 14:14:57.488607 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.400468 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n"] Jan 27 14:15:00 crc kubenswrapper[4900]: E0127 14:15:00.402029 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="170839d9-ded1-4b70-aa0f-3b54243299ed" containerName="registry-server" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.402075 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="170839d9-ded1-4b70-aa0f-3b54243299ed" containerName="registry-server" Jan 27 14:15:00 crc kubenswrapper[4900]: E0127 14:15:00.402102 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab9d673e-a635-497a-9322-cdbd3f9fd3b3" containerName="copy" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.402109 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab9d673e-a635-497a-9322-cdbd3f9fd3b3" containerName="copy" Jan 27 14:15:00 crc kubenswrapper[4900]: E0127 14:15:00.402156 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="170839d9-ded1-4b70-aa0f-3b54243299ed" containerName="extract-utilities" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.402170 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="170839d9-ded1-4b70-aa0f-3b54243299ed" containerName="extract-utilities" Jan 27 14:15:00 crc kubenswrapper[4900]: E0127 14:15:00.402186 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab9d673e-a635-497a-9322-cdbd3f9fd3b3" containerName="gather" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.402194 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab9d673e-a635-497a-9322-cdbd3f9fd3b3" containerName="gather" Jan 27 14:15:00 crc kubenswrapper[4900]: E0127 14:15:00.402224 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="170839d9-ded1-4b70-aa0f-3b54243299ed" containerName="extract-content" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.402231 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="170839d9-ded1-4b70-aa0f-3b54243299ed" containerName="extract-content" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.402523 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="170839d9-ded1-4b70-aa0f-3b54243299ed" containerName="registry-server" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.402545 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab9d673e-a635-497a-9322-cdbd3f9fd3b3" containerName="copy" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.402568 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab9d673e-a635-497a-9322-cdbd3f9fd3b3" containerName="gather" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.405792 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.478435 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n"] Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.478792 4900 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.478800 4900 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.532166 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g88qw\" (UniqueName: \"kubernetes.io/projected/76e568bd-5bac-418e-aff5-154116b244c4-kube-api-access-g88qw\") pod \"collect-profiles-29492055-4sn6n\" (UID: \"76e568bd-5bac-418e-aff5-154116b244c4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.532811 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76e568bd-5bac-418e-aff5-154116b244c4-secret-volume\") pod \"collect-profiles-29492055-4sn6n\" (UID: \"76e568bd-5bac-418e-aff5-154116b244c4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.532919 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76e568bd-5bac-418e-aff5-154116b244c4-config-volume\") pod \"collect-profiles-29492055-4sn6n\" (UID: \"76e568bd-5bac-418e-aff5-154116b244c4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.637599 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76e568bd-5bac-418e-aff5-154116b244c4-config-volume\") pod \"collect-profiles-29492055-4sn6n\" (UID: \"76e568bd-5bac-418e-aff5-154116b244c4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.638491 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g88qw\" (UniqueName: \"kubernetes.io/projected/76e568bd-5bac-418e-aff5-154116b244c4-kube-api-access-g88qw\") pod \"collect-profiles-29492055-4sn6n\" (UID: \"76e568bd-5bac-418e-aff5-154116b244c4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.638574 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76e568bd-5bac-418e-aff5-154116b244c4-secret-volume\") pod \"collect-profiles-29492055-4sn6n\" (UID: \"76e568bd-5bac-418e-aff5-154116b244c4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.638806 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76e568bd-5bac-418e-aff5-154116b244c4-config-volume\") pod \"collect-profiles-29492055-4sn6n\" (UID: \"76e568bd-5bac-418e-aff5-154116b244c4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.996297 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76e568bd-5bac-418e-aff5-154116b244c4-secret-volume\") pod \"collect-profiles-29492055-4sn6n\" (UID: \"76e568bd-5bac-418e-aff5-154116b244c4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" Jan 27 14:15:00 crc kubenswrapper[4900]: I0127 14:15:00.996677 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g88qw\" (UniqueName: \"kubernetes.io/projected/76e568bd-5bac-418e-aff5-154116b244c4-kube-api-access-g88qw\") pod \"collect-profiles-29492055-4sn6n\" (UID: \"76e568bd-5bac-418e-aff5-154116b244c4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" Jan 27 14:15:01 crc kubenswrapper[4900]: I0127 14:15:01.059726 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" Jan 27 14:15:01 crc kubenswrapper[4900]: I0127 14:15:01.614636 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n"] Jan 27 14:15:02 crc kubenswrapper[4900]: I0127 14:15:02.174208 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" event={"ID":"76e568bd-5bac-418e-aff5-154116b244c4","Type":"ContainerStarted","Data":"5ad70cfd511f863a63edff15f7fd524296b5ac12d6ceff5b9d38857b93d86179"} Jan 27 14:15:02 crc kubenswrapper[4900]: I0127 14:15:02.174528 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" event={"ID":"76e568bd-5bac-418e-aff5-154116b244c4","Type":"ContainerStarted","Data":"d8042a11c9f6a14df6918f247b92c28fbbba3a53b41e68e52e9395ebb77be0ee"} Jan 27 14:15:02 crc kubenswrapper[4900]: I0127 14:15:02.198464 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" podStartSLOduration=2.198437001 podStartE2EDuration="2.198437001s" podCreationTimestamp="2026-01-27 14:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-27 14:15:02.192616677 +0000 UTC m=+6529.429644907" watchObservedRunningTime="2026-01-27 14:15:02.198437001 +0000 UTC m=+6529.435465211" Jan 27 14:15:03 crc kubenswrapper[4900]: I0127 14:15:03.196167 4900 generic.go:334] "Generic (PLEG): container finished" podID="76e568bd-5bac-418e-aff5-154116b244c4" containerID="5ad70cfd511f863a63edff15f7fd524296b5ac12d6ceff5b9d38857b93d86179" exitCode=0 Jan 27 14:15:03 crc kubenswrapper[4900]: I0127 14:15:03.196269 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" event={"ID":"76e568bd-5bac-418e-aff5-154116b244c4","Type":"ContainerDied","Data":"5ad70cfd511f863a63edff15f7fd524296b5ac12d6ceff5b9d38857b93d86179"} Jan 27 14:15:04 crc kubenswrapper[4900]: I0127 14:15:04.698516 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" Jan 27 14:15:04 crc kubenswrapper[4900]: I0127 14:15:04.776014 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76e568bd-5bac-418e-aff5-154116b244c4-secret-volume\") pod \"76e568bd-5bac-418e-aff5-154116b244c4\" (UID: \"76e568bd-5bac-418e-aff5-154116b244c4\") " Jan 27 14:15:04 crc kubenswrapper[4900]: I0127 14:15:04.776162 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g88qw\" (UniqueName: \"kubernetes.io/projected/76e568bd-5bac-418e-aff5-154116b244c4-kube-api-access-g88qw\") pod \"76e568bd-5bac-418e-aff5-154116b244c4\" (UID: \"76e568bd-5bac-418e-aff5-154116b244c4\") " Jan 27 14:15:04 crc kubenswrapper[4900]: I0127 14:15:04.776346 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76e568bd-5bac-418e-aff5-154116b244c4-config-volume\") pod \"76e568bd-5bac-418e-aff5-154116b244c4\" (UID: \"76e568bd-5bac-418e-aff5-154116b244c4\") " Jan 27 14:15:04 crc kubenswrapper[4900]: I0127 14:15:04.777915 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76e568bd-5bac-418e-aff5-154116b244c4-config-volume" (OuterVolumeSpecName: "config-volume") pod "76e568bd-5bac-418e-aff5-154116b244c4" (UID: "76e568bd-5bac-418e-aff5-154116b244c4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 27 14:15:04 crc kubenswrapper[4900]: I0127 14:15:04.786693 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76e568bd-5bac-418e-aff5-154116b244c4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "76e568bd-5bac-418e-aff5-154116b244c4" (UID: "76e568bd-5bac-418e-aff5-154116b244c4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 27 14:15:04 crc kubenswrapper[4900]: I0127 14:15:04.786769 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76e568bd-5bac-418e-aff5-154116b244c4-kube-api-access-g88qw" (OuterVolumeSpecName: "kube-api-access-g88qw") pod "76e568bd-5bac-418e-aff5-154116b244c4" (UID: "76e568bd-5bac-418e-aff5-154116b244c4"). InnerVolumeSpecName "kube-api-access-g88qw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:15:04 crc kubenswrapper[4900]: I0127 14:15:04.880735 4900 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/76e568bd-5bac-418e-aff5-154116b244c4-config-volume\") on node \"crc\" DevicePath \"\"" Jan 27 14:15:04 crc kubenswrapper[4900]: I0127 14:15:04.880785 4900 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/76e568bd-5bac-418e-aff5-154116b244c4-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 27 14:15:04 crc kubenswrapper[4900]: I0127 14:15:04.880801 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g88qw\" (UniqueName: \"kubernetes.io/projected/76e568bd-5bac-418e-aff5-154116b244c4-kube-api-access-g88qw\") on node \"crc\" DevicePath \"\"" Jan 27 14:15:05 crc kubenswrapper[4900]: I0127 14:15:05.223312 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" event={"ID":"76e568bd-5bac-418e-aff5-154116b244c4","Type":"ContainerDied","Data":"d8042a11c9f6a14df6918f247b92c28fbbba3a53b41e68e52e9395ebb77be0ee"} Jan 27 14:15:05 crc kubenswrapper[4900]: I0127 14:15:05.223370 4900 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8042a11c9f6a14df6918f247b92c28fbbba3a53b41e68e52e9395ebb77be0ee" Jan 27 14:15:05 crc kubenswrapper[4900]: I0127 14:15:05.223672 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29492055-4sn6n" Jan 27 14:15:05 crc kubenswrapper[4900]: I0127 14:15:05.797171 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v"] Jan 27 14:15:05 crc kubenswrapper[4900]: I0127 14:15:05.809344 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29492010-rv66v"] Jan 27 14:15:06 crc kubenswrapper[4900]: I0127 14:15:06.499253 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2730bd0a-eabd-4ea7-af71-6f1fccaf1df7" path="/var/lib/kubelet/pods/2730bd0a-eabd-4ea7-af71-6f1fccaf1df7/volumes" Jan 27 14:15:09 crc kubenswrapper[4900]: I0127 14:15:09.484618 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:15:09 crc kubenswrapper[4900]: E0127 14:15:09.489728 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:15:21 crc kubenswrapper[4900]: I0127 14:15:21.483111 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:15:21 crc kubenswrapper[4900]: E0127 14:15:21.484436 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:15:33 crc kubenswrapper[4900]: I0127 14:15:33.485354 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:15:33 crc kubenswrapper[4900]: E0127 14:15:33.486651 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.129922 4900 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pvflq"] Jan 27 14:15:41 crc kubenswrapper[4900]: E0127 14:15:41.131379 4900 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76e568bd-5bac-418e-aff5-154116b244c4" containerName="collect-profiles" Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.131398 4900 state_mem.go:107] "Deleted CPUSet assignment" podUID="76e568bd-5bac-418e-aff5-154116b244c4" containerName="collect-profiles" Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.131825 4900 memory_manager.go:354] "RemoveStaleState removing state" podUID="76e568bd-5bac-418e-aff5-154116b244c4" containerName="collect-profiles" Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.135293 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.151326 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pvflq"] Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.263152 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d102393-4c6e-4522-9391-439f35db6d75-catalog-content\") pod \"redhat-operators-pvflq\" (UID: \"4d102393-4c6e-4522-9391-439f35db6d75\") " pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.263894 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkdkk\" (UniqueName: \"kubernetes.io/projected/4d102393-4c6e-4522-9391-439f35db6d75-kube-api-access-gkdkk\") pod \"redhat-operators-pvflq\" (UID: \"4d102393-4c6e-4522-9391-439f35db6d75\") " pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.263985 4900 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d102393-4c6e-4522-9391-439f35db6d75-utilities\") pod \"redhat-operators-pvflq\" (UID: \"4d102393-4c6e-4522-9391-439f35db6d75\") " pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.366814 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d102393-4c6e-4522-9391-439f35db6d75-utilities\") pod \"redhat-operators-pvflq\" (UID: \"4d102393-4c6e-4522-9391-439f35db6d75\") " pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.366985 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d102393-4c6e-4522-9391-439f35db6d75-catalog-content\") pod \"redhat-operators-pvflq\" (UID: \"4d102393-4c6e-4522-9391-439f35db6d75\") " pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.367217 4900 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkdkk\" (UniqueName: \"kubernetes.io/projected/4d102393-4c6e-4522-9391-439f35db6d75-kube-api-access-gkdkk\") pod \"redhat-operators-pvflq\" (UID: \"4d102393-4c6e-4522-9391-439f35db6d75\") " pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.367766 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d102393-4c6e-4522-9391-439f35db6d75-catalog-content\") pod \"redhat-operators-pvflq\" (UID: \"4d102393-4c6e-4522-9391-439f35db6d75\") " pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.367896 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d102393-4c6e-4522-9391-439f35db6d75-utilities\") pod \"redhat-operators-pvflq\" (UID: \"4d102393-4c6e-4522-9391-439f35db6d75\") " pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.392539 4900 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkdkk\" (UniqueName: \"kubernetes.io/projected/4d102393-4c6e-4522-9391-439f35db6d75-kube-api-access-gkdkk\") pod \"redhat-operators-pvflq\" (UID: \"4d102393-4c6e-4522-9391-439f35db6d75\") " pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:15:41 crc kubenswrapper[4900]: I0127 14:15:41.470798 4900 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:15:42 crc kubenswrapper[4900]: I0127 14:15:42.110876 4900 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pvflq"] Jan 27 14:15:42 crc kubenswrapper[4900]: I0127 14:15:42.861999 4900 generic.go:334] "Generic (PLEG): container finished" podID="4d102393-4c6e-4522-9391-439f35db6d75" containerID="b94c40b5adf64cc9a6529e9adbd8df3b314ecc9c484f8ffb6089b2c86a5d46ce" exitCode=0 Jan 27 14:15:42 crc kubenswrapper[4900]: I0127 14:15:42.862128 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvflq" event={"ID":"4d102393-4c6e-4522-9391-439f35db6d75","Type":"ContainerDied","Data":"b94c40b5adf64cc9a6529e9adbd8df3b314ecc9c484f8ffb6089b2c86a5d46ce"} Jan 27 14:15:42 crc kubenswrapper[4900]: I0127 14:15:42.862641 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvflq" event={"ID":"4d102393-4c6e-4522-9391-439f35db6d75","Type":"ContainerStarted","Data":"e341128ebd0d0ab42fb58bfdf58b8fe4577ac19df6ea1ce2d59ae8d15fc2cacf"} Jan 27 14:15:45 crc kubenswrapper[4900]: I0127 14:15:45.329303 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvflq" event={"ID":"4d102393-4c6e-4522-9391-439f35db6d75","Type":"ContainerStarted","Data":"dc9da3b03cba8c7d13cda335d74e9ca5ec0da3351d89aca4923b2244efc56684"} Jan 27 14:15:48 crc kubenswrapper[4900]: I0127 14:15:48.483352 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:15:48 crc kubenswrapper[4900]: E0127 14:15:48.485222 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:15:50 crc kubenswrapper[4900]: I0127 14:15:50.826855 4900 scope.go:117] "RemoveContainer" containerID="05e8eaad578f017059ac01ec72bd8e8fffe2a7a65db0157ee25669d2f2f9d3ab" Jan 27 14:15:51 crc kubenswrapper[4900]: I0127 14:15:51.439770 4900 generic.go:334] "Generic (PLEG): container finished" podID="4d102393-4c6e-4522-9391-439f35db6d75" containerID="dc9da3b03cba8c7d13cda335d74e9ca5ec0da3351d89aca4923b2244efc56684" exitCode=0 Jan 27 14:15:51 crc kubenswrapper[4900]: I0127 14:15:51.439860 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvflq" event={"ID":"4d102393-4c6e-4522-9391-439f35db6d75","Type":"ContainerDied","Data":"dc9da3b03cba8c7d13cda335d74e9ca5ec0da3351d89aca4923b2244efc56684"} Jan 27 14:15:52 crc kubenswrapper[4900]: I0127 14:15:52.456934 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvflq" event={"ID":"4d102393-4c6e-4522-9391-439f35db6d75","Type":"ContainerStarted","Data":"0c8200a0b19c1909c9de28595982b3b9bffe8aadca8a0d2d2d4deb8a07fd7971"} Jan 27 14:15:52 crc kubenswrapper[4900]: I0127 14:15:52.524509 4900 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pvflq" podStartSLOduration=2.288569416 podStartE2EDuration="11.524478712s" podCreationTimestamp="2026-01-27 14:15:41 +0000 UTC" firstStartedPulling="2026-01-27 14:15:42.864868949 +0000 UTC m=+6570.101897159" lastFinishedPulling="2026-01-27 14:15:52.100778245 +0000 UTC m=+6579.337806455" observedRunningTime="2026-01-27 14:15:52.498653304 +0000 UTC m=+6579.735681514" watchObservedRunningTime="2026-01-27 14:15:52.524478712 +0000 UTC m=+6579.761506922" Jan 27 14:16:01 crc kubenswrapper[4900]: I0127 14:16:01.471248 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:16:01 crc kubenswrapper[4900]: I0127 14:16:01.472110 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:16:01 crc kubenswrapper[4900]: I0127 14:16:01.481956 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:16:01 crc kubenswrapper[4900]: E0127 14:16:01.482400 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:16:02 crc kubenswrapper[4900]: I0127 14:16:02.530386 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pvflq" podUID="4d102393-4c6e-4522-9391-439f35db6d75" containerName="registry-server" probeResult="failure" output=< Jan 27 14:16:02 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:16:02 crc kubenswrapper[4900]: > Jan 27 14:16:12 crc kubenswrapper[4900]: I0127 14:16:12.482399 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:16:12 crc kubenswrapper[4900]: E0127 14:16:12.488624 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:16:12 crc kubenswrapper[4900]: I0127 14:16:12.610651 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pvflq" podUID="4d102393-4c6e-4522-9391-439f35db6d75" containerName="registry-server" probeResult="failure" output=< Jan 27 14:16:12 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:16:12 crc kubenswrapper[4900]: > Jan 27 14:16:23 crc kubenswrapper[4900]: I0127 14:16:23.146642 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pvflq" podUID="4d102393-4c6e-4522-9391-439f35db6d75" containerName="registry-server" probeResult="failure" output=< Jan 27 14:16:23 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:16:23 crc kubenswrapper[4900]: > Jan 27 14:16:24 crc kubenswrapper[4900]: I0127 14:16:24.484757 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:16:24 crc kubenswrapper[4900]: E0127 14:16:24.485665 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:16:32 crc kubenswrapper[4900]: I0127 14:16:32.539101 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pvflq" podUID="4d102393-4c6e-4522-9391-439f35db6d75" containerName="registry-server" probeResult="failure" output=< Jan 27 14:16:32 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:16:32 crc kubenswrapper[4900]: > Jan 27 14:16:37 crc kubenswrapper[4900]: I0127 14:16:37.483857 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:16:37 crc kubenswrapper[4900]: E0127 14:16:37.485147 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:16:42 crc kubenswrapper[4900]: I0127 14:16:42.528509 4900 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pvflq" podUID="4d102393-4c6e-4522-9391-439f35db6d75" containerName="registry-server" probeResult="failure" output=< Jan 27 14:16:42 crc kubenswrapper[4900]: timeout: failed to connect service ":50051" within 1s Jan 27 14:16:42 crc kubenswrapper[4900]: > Jan 27 14:16:51 crc kubenswrapper[4900]: I0127 14:16:51.533864 4900 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:16:51 crc kubenswrapper[4900]: I0127 14:16:51.602446 4900 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:16:51 crc kubenswrapper[4900]: I0127 14:16:51.782011 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pvflq"] Jan 27 14:16:52 crc kubenswrapper[4900]: I0127 14:16:52.482788 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:16:52 crc kubenswrapper[4900]: E0127 14:16:52.483201 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:16:53 crc kubenswrapper[4900]: I0127 14:16:53.553859 4900 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pvflq" podUID="4d102393-4c6e-4522-9391-439f35db6d75" containerName="registry-server" containerID="cri-o://0c8200a0b19c1909c9de28595982b3b9bffe8aadca8a0d2d2d4deb8a07fd7971" gracePeriod=2 Jan 27 14:16:54 crc kubenswrapper[4900]: I0127 14:16:54.573218 4900 generic.go:334] "Generic (PLEG): container finished" podID="4d102393-4c6e-4522-9391-439f35db6d75" containerID="0c8200a0b19c1909c9de28595982b3b9bffe8aadca8a0d2d2d4deb8a07fd7971" exitCode=0 Jan 27 14:16:54 crc kubenswrapper[4900]: I0127 14:16:54.573294 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvflq" event={"ID":"4d102393-4c6e-4522-9391-439f35db6d75","Type":"ContainerDied","Data":"0c8200a0b19c1909c9de28595982b3b9bffe8aadca8a0d2d2d4deb8a07fd7971"} Jan 27 14:16:55 crc kubenswrapper[4900]: I0127 14:16:55.526691 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:16:55 crc kubenswrapper[4900]: I0127 14:16:55.663197 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvflq" event={"ID":"4d102393-4c6e-4522-9391-439f35db6d75","Type":"ContainerDied","Data":"e341128ebd0d0ab42fb58bfdf58b8fe4577ac19df6ea1ce2d59ae8d15fc2cacf"} Jan 27 14:16:55 crc kubenswrapper[4900]: I0127 14:16:55.663265 4900 scope.go:117] "RemoveContainer" containerID="0c8200a0b19c1909c9de28595982b3b9bffe8aadca8a0d2d2d4deb8a07fd7971" Jan 27 14:16:55 crc kubenswrapper[4900]: I0127 14:16:55.663498 4900 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvflq" Jan 27 14:16:55 crc kubenswrapper[4900]: I0127 14:16:55.675236 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d102393-4c6e-4522-9391-439f35db6d75-catalog-content\") pod \"4d102393-4c6e-4522-9391-439f35db6d75\" (UID: \"4d102393-4c6e-4522-9391-439f35db6d75\") " Jan 27 14:16:55 crc kubenswrapper[4900]: I0127 14:16:55.675312 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d102393-4c6e-4522-9391-439f35db6d75-utilities\") pod \"4d102393-4c6e-4522-9391-439f35db6d75\" (UID: \"4d102393-4c6e-4522-9391-439f35db6d75\") " Jan 27 14:16:55 crc kubenswrapper[4900]: I0127 14:16:55.675388 4900 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkdkk\" (UniqueName: \"kubernetes.io/projected/4d102393-4c6e-4522-9391-439f35db6d75-kube-api-access-gkdkk\") pod \"4d102393-4c6e-4522-9391-439f35db6d75\" (UID: \"4d102393-4c6e-4522-9391-439f35db6d75\") " Jan 27 14:16:55 crc kubenswrapper[4900]: I0127 14:16:55.676585 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d102393-4c6e-4522-9391-439f35db6d75-utilities" (OuterVolumeSpecName: "utilities") pod "4d102393-4c6e-4522-9391-439f35db6d75" (UID: "4d102393-4c6e-4522-9391-439f35db6d75"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:16:55 crc kubenswrapper[4900]: I0127 14:16:55.677033 4900 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d102393-4c6e-4522-9391-439f35db6d75-utilities\") on node \"crc\" DevicePath \"\"" Jan 27 14:16:55 crc kubenswrapper[4900]: I0127 14:16:55.749457 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d102393-4c6e-4522-9391-439f35db6d75-kube-api-access-gkdkk" (OuterVolumeSpecName: "kube-api-access-gkdkk") pod "4d102393-4c6e-4522-9391-439f35db6d75" (UID: "4d102393-4c6e-4522-9391-439f35db6d75"). InnerVolumeSpecName "kube-api-access-gkdkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 27 14:16:55 crc kubenswrapper[4900]: I0127 14:16:55.794002 4900 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkdkk\" (UniqueName: \"kubernetes.io/projected/4d102393-4c6e-4522-9391-439f35db6d75-kube-api-access-gkdkk\") on node \"crc\" DevicePath \"\"" Jan 27 14:16:55 crc kubenswrapper[4900]: I0127 14:16:55.798312 4900 scope.go:117] "RemoveContainer" containerID="dc9da3b03cba8c7d13cda335d74e9ca5ec0da3351d89aca4923b2244efc56684" Jan 27 14:16:55 crc kubenswrapper[4900]: I0127 14:16:55.873775 4900 scope.go:117] "RemoveContainer" containerID="b94c40b5adf64cc9a6529e9adbd8df3b314ecc9c484f8ffb6089b2c86a5d46ce" Jan 27 14:16:55 crc kubenswrapper[4900]: I0127 14:16:55.920613 4900 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d102393-4c6e-4522-9391-439f35db6d75-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4d102393-4c6e-4522-9391-439f35db6d75" (UID: "4d102393-4c6e-4522-9391-439f35db6d75"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 27 14:16:56 crc kubenswrapper[4900]: I0127 14:16:56.000016 4900 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d102393-4c6e-4522-9391-439f35db6d75-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 27 14:16:56 crc kubenswrapper[4900]: I0127 14:16:56.017225 4900 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pvflq"] Jan 27 14:16:56 crc kubenswrapper[4900]: I0127 14:16:56.031486 4900 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pvflq"] Jan 27 14:16:56 crc kubenswrapper[4900]: I0127 14:16:56.516310 4900 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d102393-4c6e-4522-9391-439f35db6d75" path="/var/lib/kubelet/pods/4d102393-4c6e-4522-9391-439f35db6d75/volumes" Jan 27 14:17:05 crc kubenswrapper[4900]: I0127 14:17:05.483116 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:17:05 crc kubenswrapper[4900]: E0127 14:17:05.484244 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:17:17 crc kubenswrapper[4900]: I0127 14:17:17.482694 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:17:17 crc kubenswrapper[4900]: E0127 14:17:17.483800 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:17:28 crc kubenswrapper[4900]: I0127 14:17:28.483606 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:17:28 crc kubenswrapper[4900]: E0127 14:17:28.485009 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:17:39 crc kubenswrapper[4900]: I0127 14:17:39.483899 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:17:39 crc kubenswrapper[4900]: E0127 14:17:39.487503 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:17:54 crc kubenswrapper[4900]: I0127 14:17:54.483613 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:17:54 crc kubenswrapper[4900]: E0127 14:17:54.484993 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:18:05 crc kubenswrapper[4900]: I0127 14:18:05.482896 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:18:05 crc kubenswrapper[4900]: E0127 14:18:05.484358 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:18:17 crc kubenswrapper[4900]: I0127 14:18:17.482877 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:18:17 crc kubenswrapper[4900]: E0127 14:18:17.484030 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:18:29 crc kubenswrapper[4900]: I0127 14:18:29.482743 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:18:29 crc kubenswrapper[4900]: E0127 14:18:29.483958 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:18:44 crc kubenswrapper[4900]: I0127 14:18:44.482675 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:18:44 crc kubenswrapper[4900]: E0127 14:18:44.484358 4900 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2pp6x_openshift-machine-config-operator(2f2c6408-cc23-4b42-92ba-ef08be13637b)\"" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" podUID="2f2c6408-cc23-4b42-92ba-ef08be13637b" Jan 27 14:18:57 crc kubenswrapper[4900]: I0127 14:18:57.482626 4900 scope.go:117] "RemoveContainer" containerID="3be8723be905aebaa9f750dac5f5496479b010e6e4c4b4982f004adcec06f20f" Jan 27 14:18:58 crc kubenswrapper[4900]: I0127 14:18:58.437738 4900 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2pp6x" event={"ID":"2f2c6408-cc23-4b42-92ba-ef08be13637b","Type":"ContainerStarted","Data":"78af95ecadb8130f52231b4976aefc7cfbf41102f377150c5e64e87d231dc9ee"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515136144604024451 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015136144604017366 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015136127006016506 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015136127007015457 5ustar corecore